コード例 #1
0
image_shape_original_label = utils.load_image(files_train_label[0]).shape[0:2]
print(f"Found {len(files_train_label)} training samples")

# get max_samples_validation random validation samples
files_valid_input = [
    utils.get_files_in_folder(folder) for folder in conf.input_validation
]
files_valid_label = utils.get_files_in_folder(conf.label_validation)
_, idcs = utils.sample_list(files_valid_label,
                            n_samples=conf.max_samples_validation)
files_valid_input = [np.take(f, idcs) for f in files_valid_input]
files_valid_label = np.take(files_valid_label, idcs)
print(f"Found {len(files_valid_label)} validation samples")

# parse one-hot-conversion.xml
conf.one_hot_palette_input = utils.parse_convert_xml(
    conf.one_hot_palette_input)
conf.one_hot_palette_label = utils.parse_convert_xml(
    conf.one_hot_palette_label)
n_classes_input = len(conf.one_hot_palette_input)
n_classes_label = len(conf.one_hot_palette_label)


# build dataset pipeline parsing functions
def parse_sample(input_files, label_file):
    # parse and process input images
    inputs = []
    for inp in input_files:
        inp = utils.load_image_op(inp)
        inp = utils.resize_image_op(
            inp,
            image_shape_original_input,
コード例 #2
0
import numpy as np
import utils
palette = utils.parse_convert_xml("one_hot_conversion/convert_new.xml")
dist = utils.get_class_distribution("../data/our_data/val/bev+occlusion",
                                    (256, 512), palette)
weights = np.log(np.reciprocal(list(dist.values())))
print(weights)