Пример #1
0
def main(flags):
    city_list = ['austin', 'chicago', 'kitsap', 'tyrol-w', 'vienna']
    flags.llh_file_dir = flags.llh_file_dir.format(flags.finetune_city)
    weight = np.load(flags.llh_file_dir)

    # make network
    # define place holder
    X = tf.placeholder(tf.float32, shape=[None, flags.input_size[0], flags.input_size[1], 3], name='X')
    y = tf.placeholder(tf.int32, shape=[None, flags.input_size[0], flags.input_size[1], 1], name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({'X': X, 'Y': y},
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)          # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif', 'Map GT to (0, 1)', [3], opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])         # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr([0, 1, 2, 4], # extract all 4 channels
                                                    cSize=flags.input_size, # patch size as 572*572
                                                    numPixOverlap=int(model.get_overlap()/2),  # overlap as 92
                                                    extSave=['jpg', 'jpg', 'jpg', 'png'], # save rgb files as jpg and gt as png
                                                    isTrain=True,
                                                    gtInd=3,
                                                    pad=model.get_overlap()) # pad around the tiles
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt', 'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]
    # use first city for validation
    filter_train = []
    filter_valid = []
    for i in range(5):
        for j in range(1, 37):
            if i != flags.finetune_city and j > 5:
                filter_train.append(j * 10 + i)
            elif i == flags.finetune_city and j <= 5:
                filter_valid.append(j * 10 + i)
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_train)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(idx, file_list, filter_valid)

    dataReader_train = uabDataReader.ImageLabelReaderPatchSampleControl(
        [3], [0, 1, 2], patchDir, file_list_train, flags.input_size, flags.batch_size,
        weight, dataAug='flip,rotate', block_mean=np.append([0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2], patchDir, file_list_valid, flags.input_size,
                                                      flags.batch_size, dataAug=' ',
                                                      block_mean=np.append([0], img_mean), batch_code=0)

    # train
    start_time = time.time()
    model.train_config('X', 'Y', flags.n_train, flags.n_valid, flags.input_size, uabRepoPaths.modelPath,
                       loss_type='xent', par_dir='Inria_Domain_Selection')
    model.run(train_reader=dataReader_train,
              valid_reader=dataReader_valid,
              pretrained_model_dir=flags.pred_model_dir.format(flags.finetune_city),
              isTrain=True,
              img_mean=img_mean,
              verb_step=100,  # print a message every 100 step(sample)
              save_epoch=5,  # save the model every 5 epochs
              gpu=GPU,
              tile_size=flags.tile_size,
              patch_size=flags.input_size
              )

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration/60/60))
Пример #2
0
def main(flags):
    flags.llh_file_dir = flags.llh_file_dir.format(flags.finetune_city)
    weight = np.load(flags.llh_file_dir)

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelGAN_V3Shrink(
        {
            'X': X,
            'Y': y
        },
        trainable=mode,
        model_name=flags.model_name,
        input_size=flags.input_size,
        batch_size=flags.batch_size,
        learn_rate=flags.learning_rate,
        decay_step=flags.decay_step,
        decay_rate=flags.decay_rate,
        epochs=flags.epochs,
        start_filter_num=flags.sfn,
        pad=flags.pad,
    )
    model.create_graph(['X', 'Y'], class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(255)
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap() // 2)
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx_city, file_list = uabCrossValMaker.uabUtilGetFolds(
        patchDir, 'fileList.txt', 'city')
    idx_tile, _ = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                   'force_tile')
    idx = [j * 10 + i for i, j in zip(idx_city, idx_tile)]
    # use first city for validation
    filter_train = []
    filter_train_target = []
    filter_valid = []
    for i in range(5):
        for j in range(1, 37):
            if i != flags.finetune_city and j > 5:
                filter_train.append(j * 10 + i)
            elif i == flags.finetune_city and j > 5:
                filter_train_target.append(j * 10 + i)
            elif i == flags.finetune_city and j <= 5:
                filter_valid.append(j * 10 + i)
    # use first city for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train)
    filter_list_train_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_train_target)
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, filter_valid)

    dataReader_train = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_train,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug='flip,rotate',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=0)
    dataReader_train_source = uabDataReader.ImageLabelReaderPatchSampleControl(
        [3], [0, 1, 2],
        patchDir,
        file_list_train,
        flags.input_size,
        flags.batch_size,
        weight,
        dataAug='flip,rotate',
        block_mean=np.append([0], img_mean))
    dataReader_train_target = uabDataReader.ImageLabelReader(
        [3], [0, 1, 2],
        patchDir,
        filter_list_train_valid,
        flags.input_size,
        flags.batch_size,
        dataAug='flip,rotate',
        block_mean=np.append([0], img_mean),
        batch_code=0)
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_valid,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug=' ',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=0)

    # train
    start_time = time.time()
    model.load_weights(flags.pred_model_dir.format(flags.finetune_city),
                       layers2load='1,2,3,4,5,6,7,8,9',
                       load_final_layer=True)
    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='Inria_GAN/V3LOO')
    model.run(
        train_reader=dataReader_train,
        train_reader_source=dataReader_train_source,
        train_reader_target=dataReader_train_target,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=flags.save_epoch,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
Пример #3
0
                                                  'force_tile')
# use first 5 tiles for validation
file_list_train = uabCrossValMaker.make_file_list_by_key(
    idx, file_list, [i for i in range(6, 37)])
file_list_valid = uabCrossValMaker.make_file_list_by_key(
    idx, file_list, [i for i in range(0, 6)])

patch_id_dict = {}
for cnt, item in enumerate(file_list_train):
    p_name = '_'.join(item[0].split('_')[:2])
    patch_id_dict[p_name] = cnt

dataReader_train = uabDataReader.ImageLabelReaderPatchSampleControl(
    [3], [0, 1, 2],
    patchDir,
    file_list_train, (321, 321),
    100,
    patch_prob,
    patch_name=True,
    block_mean=np.append([0], img_mean))

patch_cnt = np.zeros(len(file_list_train), dtype=np.uint64)
c_cnt = np.zeros(5)
city_dict = {'aus': 0, 'chi': 1, 'kit': 2, 'tyr': 3, 'vie': 4}
for reader_cnt in tqdm(range(100000)):
    idx_batch = np.random.choice(len(file_list_train), 100, p=patch_prob)
    for i in idx_batch:
        row = file_list_train[i]
        p_name = '_'.join(row[0].split('_')[:2])
        c_cnt[city_dict[p_name[:3]]] += 1
        patch_cnt[patch_id_dict[p_name]] += 1
plt.bar(np.arange(5), c_cnt)
Пример #4
0
def main(flags):
    patch_prior = np.load(
        os.path.join(flags.group_file_dir,
                     'unet_{}_patch_prior.npy'.format(flags.train_city)))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({
        'X': X,
        'Y': y
    },
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection('inria')
    opDetObj = bPreproc.uabOperTileDivide(
        255)  # inria GT has value 0 and 255, we map it back to 0 and 1
    # [3] is the channel id of GT
    rescObj = uabPreprocClasses.uabPreprocMultChanOp([], 'GT_Divide.tif',
                                                     'Map GT to (0, 1)', [3],
                                                     opDetObj)
    rescObj.run(blCol)
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 4],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['jpg', 'jpg', 'jpg', 'png'],
        isTrain=True,
        gtInd=3,
        pad=model.get_overlap() / 2)
    patchDir = extrObj.run(blCol)

    # make data reader
    # use uabCrossValMaker to get fileLists for training and validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'force_tile')
    # use first 5 tiles for validation
    file_list_train = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(6, 37)])
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [i for i in range(0, 6)])

    dataReader_train = uabDataReader.ImageLabelReaderPatchSampleControl(
        [3], [0, 1, 2],
        patchDir,
        file_list_train,
        flags.input_size,
        flags.batch_size,
        patch_prior,
        dataAug='flip,rotate',
        block_mean=np.append([0], img_mean))
    # no augmentation needed for validation
    dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                      patchDir,
                                                      file_list_valid,
                                                      flags.input_size,
                                                      flags.batch_size,
                                                      dataAug=' ',
                                                      block_mean=np.append(
                                                          [0], img_mean),
                                                      batch_code=2)

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='Inria_Domain_LOO')
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=PRED_DIR,
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size,
    )

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))