Exemple #1
0
def plot_patch_effect(res_dir, input_sizes, name, appendix='.npy', savefig=True):
    iou_record_all = np.zeros(len(input_sizes))
    time_record_all = np.zeros(len(input_sizes))
    for cnt_1, size in enumerate(input_sizes):
        file_name = '{}{}'.format(size, appendix)
        data = dict(np.load(os.path.join(res_dir, file_name)).tolist())
        iou = []
        for item in data.keys():
            if item != 'kitsap4' and item != 'time':
                iou.append(data[item]*100)
            elif item == 'time':
                time_record_all[cnt_1] = data[item]
        iou_record_all[cnt_1] = np.mean(iou)
    # plot the figure
    fig = plt.figure(figsize=(8, 4))
    matplotlib.rcParams.update({'font.size': 14})
    plt.subplot(211)
    plt.plot(np.array(input_sizes), iou_record_all)
    plt.xticks([], [])
    plt.ylabel('IoU')
    plt.title(name)
    plt.subplot(212)
    plt.plot(np.array(input_sizes), time_record_all)
    plt.xticks(input_sizes, input_sizes)
    plt.xlabel('Patch Size')
    plt.ylabel('Time:s')
    fig.tight_layout()
    if savefig:
        img_dir, _ = sis_utils.get_task_img_folder()
        plt.savefig(os.path.join(img_dir, 'paper2_{}.png'.format(name)))
    plt.show()
def get_dirs():
    img_dir, task_dir = sis_utils.get_task_img_folder()
    dirs = {
        'task': task_dir,
        'image': img_dir,
        'raw': r'/home/lab/Documents/bohao/data/transmission_line/raw',
        'conf':
        r'/media/ei-edl01/user/bh163/tasks/2018.11.16.transmission_line/'
        r'confmap_uab_UnetCrop_lines_pw30_0_PS(572, 572)_BS5_EP100_LR0.0001_DS60_DR0.1_SFN32',
        'line': r'/media/ei-edl01/data/uab_datasets/lines/data/Original_Tiles'
    }
    return dirs
def get_ious_patch_size_224(flags, patch_size_list):
    for patch_size in patch_size_list:
        model_name = 'UNET_PS-{}__BS-{}__E-100__NT-8000__DS-60__CT-__no_random'.format(
            patch_size, 1)
        print(model_name)

        result = sis_utils.test_unet(flags.rsr_data_dir, flags.test_data_dir,
                                     (224, 224), model_name, flags.num_classes,
                                     flags.ckdir, flags.city_name,
                                     flags.batch_size)
        print(result)
        _, task_dir = sis_utils.get_task_img_folder()
        np.save(os.path.join(task_dir, '{}_{}.npy'.format(model_name, 224)),
                result)
def evaluate_results(flags, model_name, height_mode):
    result = sis_utils.test_authentic_unet_height(flags.rsr_data_dir,
                                                  flags.test_data_dir,
                                                  flags.input_size,
                                                  model_name,
                                                  flags.num_classes,
                                                  flags.ckdir,
                                                  flags.city_name,
                                                  flags.batch_size,
                                                  ds_name='urban_mapper',
                                                  height_mode=height_mode)

    print(result)

    _, task_dir = sis_utils.get_task_img_folder()
    np.save(os.path.join(task_dir, '{}.npy'.format(model_name)), result)
def get_ious(flags):
    for cnt, batch_size in enumerate([1]):
        model_name = 'UNET_PS-{}__BS-{}__E-100__NT-8000__DS-60__CT-__no_random'.format(
            flags.input_size[0], batch_size)
        print(model_name)

        result = sis_utils.test_unet(flags.rsr_data_dir, flags.test_data_dir,
                                     flags.input_size, model_name,
                                     flags.num_classes, flags.ckdir,
                                     flags.city_name, flags.batch_size)
        print(result)
        _, task_dir = sis_utils.get_task_img_folder()
        np.save(
            os.path.join(task_dir, '{}_{}.npy'.format(model_name,
                                                      flags.input_size)),
            result)
def compute_distance_tile(set_1, set_2, patch_size, patch_num):
    _, task_dir = sis_utils.get_task_img_folder()
    res50 = keras.applications.resnet50.ResNet50(include_top=True,
                                                 weights='imagenet')
    train_vectors = []
    valid_vectors = []
    train_vector_name = os.path.join(
        task_dir, 'train_l{}_ps{}_pn{}.npy'.format(len(set_1), patch_size,
                                                   patch_num))
    valid_vector_name = os.path.join(
        task_dir, 'valid_l{}_ps{}_pn{}.npy'.format(len(set_2), patch_size,
                                                   patch_num))

    if not os.path.exists(train_vector_name):
        print('Extract vectors from set 1...')
        for file in tqdm(set_1):
            patches = sparse_patchify(file, patch_size, patch_num)
            train_vectors.append(res50.predict(patches))
        train_vectors = np.concatenate(train_vectors)
        np.save(train_vector_name, train_vectors)
    else:
        print('Load vectors from set 1...')
        train_vectors = np.load(train_vector_name)

    if not os.path.exists(valid_vector_name):
        print('Extract vectors from set 2...')
        for file in tqdm(set_2):
            patches = sparse_patchify(file, patch_size, patch_num)
            valid_vectors.append(res50.predict(patches))
        valid_vectors = np.concatenate(valid_vectors)
        np.save(valid_vector_name, valid_vectors)
    else:
        print('Load vectors from set 2...')
        valid_vectors = np.load(valid_vector_name)

    dist_list = []
    tile_id_list = []
    dist = np.zeros(train_vectors.shape[0])
    for cnt, val_vec in enumerate(valid_vectors):
        dist += np.sum(np.square(train_vectors - val_vec), axis=1)
        if cnt % patch_num == 0:
            dist_list.append(np.min(dist))
            tile_id_list.append(np.argmin(dist))
            dist = np.zeros(train_vectors.shape[0])

    return dist_list, tile_id_list
Exemple #7
0
def make_pred_map(model_dirs, ids, p_dir):
    model_dirs = [model_dirs[a] for a in ids]
    _, task_dir = sis_utils.get_task_img_folder(local_dir=True)
    task_dir = os.path.join(
        task_dir, 'fuse_{}_{}'.format(len(model_dirs),
                                      '+'.join([str(idx) for idx in ids])))
    if not os.path.exists(task_dir):
        os.makedirs(task_dir)
    else:
        return task_dir

    model_name = [a.split('/')[-1] for a in model_dirs]
    print('Evaluating using {}...'.format('+'.join(model_name)))

    # data prepare step
    Data = rsrClassData(flags.rsr_data_dir)
    (collect_files_test,
     meta_test) = Data.getCollectionByName(flags.test_data_dir)
    for (image_name, label_name) in collect_files_test:
        c_names = flags.city_name.split(',')
        for c_name in c_names:
            if c_name in image_name:
                city_name = re.findall('[a-z\-]*(?=[0-9]+\.)', image_name)[0]
                tile_id = re.findall('[0-9]+(?=\.tif)', image_name)[0]

                preds = np.zeros((5000, 5000, 2))
                for dir in model_dirs:
                    dir = os.path.join(p_dir, 'temp_save', dir)
                    preds += np.load(
                        os.path.join(dir,
                                     '{}_{}.npy'.format(city_name, tile_id)))
                pred_labels = sis_utils.get_pred_labels(preds)
                scipy.misc.imsave(
                    os.path.join(task_dir,
                                 '{}_{}.png'.format(city_name, tile_id)),
                    pred_labels)

                print('{}_{}.png saved in {}'.format(city_name, tile_id,
                                                     task_dir))

    return task_dir
Exemple #8
0
def compute_distance_patch(train_set, patches, patch_size, patch_num,
                           patch_dir):
    _, task_dir = sis_utils.get_task_img_folder()
    res50 = keras.applications.resnet50.ResNet50(include_top=True,
                                                 weights='imagenet')
    fc2048 = keras.models.Model(inputs=res50.input,
                                outputs=res50.get_layer('flatten_1').output)
    train_vectors = []
    valid_vectors = []
    train_vector_name = os.path.join(
        task_dir,
        'train_l{}_ps{}_pn{}_2048.npy'.format(len(train_set), patch_size,
                                              patch_num))
    valid_vector_name = os.path.join(
        task_dir, 'patch_vector_2048.npy'.format(len(patches), patch_size,
                                                 patch_num))

    if not os.path.exists(train_vector_name):
        print('Extract vectors from set 1...')
        for file in tqdm(train_set):
            patches_val = sparse_patchify(file, patch_size, patch_num)
            train_vectors.append(fc2048.predict(patches_val))
        train_vectors = np.concatenate(train_vectors)
        np.save(train_vector_name, train_vectors)
    else:
        print('Load vectors from set 1...')
        train_vectors = np.load(train_vector_name)

    if not os.path.exists(valid_vector_name):
        print('Extract vectors from set 2...')
        for file in tqdm(patches):
            patches_val = read_patch_file(file, patch_size, patch_dir)
            valid_vectors.append(fc2048.predict(patches_val))
        valid_vectors = np.concatenate(valid_vectors)
        np.save(valid_vector_name, valid_vectors)
    else:
        print('Load vectors from set 2...')
        valid_vectors = np.load(valid_vector_name)

    return train_vectors, valid_vectors
Exemple #9
0
                img = Image.new('L', tile_size, 0)
                ImageDraw.Draw(img).polygon(poly.ravel().tolist(),
                                            outline=1,
                                            fill=None)
                polygon_im += np.array(img, dtype=bool)

        for pixel_list in obj_struct['pixelList']:
            polygon_im[pixel_list[:, 0], pixel_list[:, 1]] = 2

        return polygon_im


if __name__ == '__main__':
    start_time = time.time()

    img_dir, task_dir = sis_utils.get_task_img_folder()

    model_dir = [
        'confmap_uab_UnetCrop_aemo_comb_xfold0_1_PS(572, 572)_BS5_EP80_LR0.001_DS30_DR0.1_SFN32',
        'confmap_uab_UnetCrop_aemo_comb_xfold1_1_PS(572, 572)_BS5_EP80_LR0.001_DS30_DR0.1_SFN32',
        'confmap_uab_UnetCrop_aemo_comb_xfold2_1_PS(572, 572)_BS5_EP80_LR0.001_DS30_DR0.1_SFN32',
    ]
    model_name = ['Fold 0', 'Fold 1', 'Fold 2']

    true_agg = []
    conf_agg = []

    for md, mn in zip(model_dir, model_name):
        print('Scoring {}...'.format(mn))

        gt_dir = r'/home/lab/Documents/bohao/data/aemo/aemo_union'
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = myUnetModelCrop({
        'X': X,
        'Y': y
    },
                            trainable=mode,
                            model_name=flags.model_name,
                            input_size=flags.input_size,
                            batch_size=flags.batch_size,
                            learn_rate=flags.learning_rate,
                            decay_step=flags.decay_step,
                            decay_rate=flags.decay_rate,
                            epochs=flags.epochs,
                            start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([0, 1, 2])  # get mean of rgb info
    print(img_mean)

    img_dir, task_dir = sis_utils.get_task_img_folder()
    save_dir = os.path.join(task_dir, 'bihar_patches')
    ersa_utils.make_dir_if_not_exist(save_dir)
    files, par_dir = blCol.getAllTileByDirAndExt([0, 1, 2, 3])
    resize_patches(files, par_dir, flags.input_size, save_dir)

    patchDir = save_dir

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'tile')
    # use first city for validation
    #assert len(file_list) == flags.n_train + flags.n_valid
    file_list_train = [a for a in file_list[:45]]
    file_list_valid = [a for a in file_list[-5:]]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            patchDir,
            file_list_train,
            flags.input_size,
            None,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))
        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([3], [0, 1, 2],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          None,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    model.train_config('X',
                       'Y',
                       flags.n_train,
                       flags.n_valid,
                       flags.input_size,
                       uabRepoPaths.modelPath,
                       loss_type='xent',
                       par_dir='{}'.format(flags.ds_name))
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=
        None,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=200,  # save the model every 5 epochs
        gpu=GPU,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))
def main(flags):
    np.random.seed(int(flags.run_id))
    tf.set_random_seed(int(flags.run_id))

    if flags.start_layer >= 10:
        pass
    else:
        flags.model_name += '_up{}'.format(flags.start_layer)

    # make network
    # define place holder
    X = tf.placeholder(
        tf.float32,
        shape=[None, flags.input_size[0], flags.input_size[1], 3],
        name='X')
    y = tf.placeholder(
        tf.int32,
        shape=[None, flags.input_size[0], flags.input_size[1], 1],
        name='y')
    mode = tf.placeholder(tf.bool, name='mode')
    model = uabMakeNetwork_UNet.UnetModelCrop({
        'X': X,
        'Y': y
    },
                                              trainable=mode,
                                              model_name=flags.model_name,
                                              input_size=flags.input_size,
                                              batch_size=flags.batch_size,
                                              learn_rate=flags.learning_rate,
                                              decay_step=flags.decay_step,
                                              decay_rate=flags.decay_rate,
                                              epochs=flags.epochs,
                                              start_filter_num=flags.sfn)
    model.create_graph('X', class_num=flags.num_classes)

    # create collection
    # the original file is in /ei-edl01/data/uab_datasets/inria
    blCol = uab_collectionFunctions.uabCollection(flags.ds_name)
    blCol.readMetadata()
    img_mean = blCol.getChannelMeans([1, 2, 3])  # get mean of rgb info

    # extract patches
    extrObj = uab_DataHandlerFunctions.uabPatchExtr(
        [0, 1, 2, 3],
        cSize=flags.input_size,
        numPixOverlap=int(model.get_overlap()),
        extSave=['png', 'jpg', 'jpg', 'jpg'],
        isTrain=True,
        gtInd=0,
        pad=int(model.get_overlap() // 2))
    patchDir = extrObj.run(blCol)

    # make data reader
    # use first 5 tiles for validation
    idx, file_list = uabCrossValMaker.uabUtilGetFolds(patchDir, 'fileList.txt',
                                                      'tile')
    file_list_valid = uabCrossValMaker.make_file_list_by_key(
        idx, file_list, [4, 5])

    img_dir, task_dir = sis_utils.get_task_img_folder()
    save_dir = os.path.join(img_dir, 'hard_samples')
    file_list_train = ersa_utils.load_file(
        os.path.join(save_dir, 'file_list.txt'))
    file_list_train = [l.strip().split(' ') for l in file_list_train]

    with tf.name_scope('image_loader'):
        # GT has no mean to subtract, append a 0 for block mean
        dataReader_train = uabDataReader.ImageLabelReader(
            [3], [0, 1, 2],
            save_dir,
            file_list_train,
            flags.input_size,
            flags.tile_size,
            flags.batch_size,
            dataAug='flip,rotate',
            block_mean=np.append([0], img_mean))

        # no augmentation needed for validation
        dataReader_valid = uabDataReader.ImageLabelReader([0], [1, 2, 3],
                                                          patchDir,
                                                          file_list_valid,
                                                          flags.input_size,
                                                          flags.tile_size,
                                                          flags.batch_size,
                                                          dataAug=' ',
                                                          block_mean=np.append(
                                                              [0], img_mean))

    # train
    start_time = time.time()

    if flags.start_layer >= 10:
        model.train_config('X',
                           'Y',
                           flags.n_train,
                           flags.n_valid,
                           flags.input_size,
                           uabRepoPaths.modelPath,
                           loss_type='xent',
                           par_dir='aemo/{}'.format(flags.ds_name))
    else:
        model.train_config('X',
                           'Y',
                           flags.n_train,
                           flags.n_valid,
                           flags.input_size,
                           uabRepoPaths.modelPath,
                           loss_type='xent',
                           par_dir='aemo/{}'.format(flags.ds_name),
                           train_var_filter=[
                               'layerup{}'.format(i)
                               for i in range(flags.start_layer, 10)
                           ])
    model.run(
        train_reader=dataReader_train,
        valid_reader=dataReader_valid,
        pretrained_model_dir=flags.
        model_dir,  # train from scratch, no need to load pre-trained model
        isTrain=True,
        img_mean=img_mean,
        verb_step=100,  # print a message every 100 step(sample)
        save_epoch=5,  # save the model every 5 epochs
        gpu=GPU,
        tile_size=flags.tile_size,
        patch_size=flags.input_size)

    duration = time.time() - start_time
    print('duration {:.2f} hours'.format(duration / 60 / 60))