def __init__(self, up_size, path=None, ds_name=None, name='up_sampling'): self.up_size = up_size pe_name = '{}_h{}_w{}'.format(name, up_size[0], up_size[1]) if path is None: assert ds_name is not None path = ersa_utils.get_block_dir('data', ['preprocess', ds_name, name]) super().__init__(pe_name, path, func=self.process)
def __init__(self, gamma, path=None, ds_name=None, name='gamma_adjust'): """ :param path: path to save the adjusted images :param ds_name: where to save data if path is None :param gamma: gamma to adjust images' brightness """ self.gamma = gamma pe_name = '{}_gamma{}'.format(name, self.gamma) if path is None: assert ds_name is not None path = ersa_utils.get_block_dir('data', ['preprocess', ds_name, name]) super().__init__(pe_name, path, func=self.process)
def read_collection(clc_name=None, clc_dir=None, raw_data_path=None, field_name=None, field_id=None, rgb_ext=None, gt_ext=None, file_ext=None, files=None, force_run=False): """ Read and initialize a collection from a directory, try to create one if it does not exists :param clc_name: name of the collection :param clc_dir: directory to the collection :return: the collection object, assertion error if no process hasn't completed :param raw_data_path: path to where the data are stored :param field_name: could be name of the cities, or other prefix of the images :param field_id: could be id of the tiles, or other suffix of the images :param rgb_ext: name extensions that indicates the images are not ground truth, use ',' to separate if you have multiple extensions :param gt_ext: name extensions that indicates the images are ground truth, you can only have at most one ground truth extension :param file_ext: extension of the files, use ',' to separate if you have multiple extensions, if all the files have the same extension, you only need to specify one :param files: files in the raw_data_path, can be specified by user to exclude some of the raw files, if it is None, all files will be found automatically :param force_run: force run the collection maker even if it already exists """ if clc_dir is None: assert clc_name is not None clc_dir = ersa_utils.get_block_dir('data', ['collection', clc_name]) # check if finish if processBlock.BasicProcess('collection_maker', clc_dir).check_finish(): # read metadata meta_data = ersa_utils.load_file(os.path.join(clc_dir, 'meta.pkl')) # create collection cm = CollectionMaker(meta_data['raw_data_path'], meta_data['field_name'], meta_data['field_id'], meta_data['rgb_ext'], meta_data['gt_ext'], meta_data['file_ext'], meta_data['files'], meta_data['clc_name'], force_run=force_run) return cm else: # try to create the collection return CollectionMaker(raw_data_path, field_name, field_id, rgb_ext, gt_ext, file_ext, files, clc_name, force_run)
def __init__(self, path, switch_dict, field_ext_pair, name): """ :param path: directory of the collection :param mult_factor: constant number :param field_ext_pair: a list where the first element is the field extension to be operated on and the second field extension is the name of the new field """ func = self.process self.switch_dict = switch_dict self.field_ext_pair = field_ext_pair self.clc = collectionMaker.read_collection(clc_dir=path) path = ersa_utils.get_block_dir('data', ['preprocess', os.path.basename(path), name]) self.files = [] super().__init__(name, path, func)
def __init__(self, path, mult_factor, field_ext_pair): """ :param path: directory of the collection :param mult_factor: constant number :param field_ext_pair: a list where the first element is the field extension to be operated on and the second field extension is the name of the new field """ if mult_factor >= 1: name = 'chan_mult_{:.5f}'.format(mult_factor).replace('.', 'p') else: name = 'chan_mult_{}'.format(str(Fraction(mult_factor).limit_denominator()).replace('/', 'd')) func = self.process self.mult_factor = mult_factor self.field_ext_pair = field_ext_pair self.clc = collectionMaker.read_collection(clc_dir=path) path = ersa_utils.get_block_dir('data', ['preprocess', os.path.basename(path), name]) self.files = [] super().__init__(name, path, func)
def __init__(self, patch_size, tile_size=None, ds_name='', overlap=0, pad=0, name='patch_extractor'): """ :param patch_size: patch size to be extracted :param tile_size: tile size (image size) :param ds_name: name of the dataset, it will be used to name the folder :param overlap: #overlapping pixels :param pad: #pxiels to pad around the iamge :param name: name of the process """ self.patch_size = np.array(patch_size, dtype=np.int32) if tile_size is not None: self.tile_size = np.array(tile_size, dtype=np.int32) else: self.tile_size = tile_size self.overlap = overlap self.pad = pad pe_name = '{}_h{}w{}_overlap{}_pad{}'.format(name, self.patch_size[0], self.patch_size[1], self.overlap, self.pad) path = ersa_utils.get_block_dir('data', [name, ds_name, pe_name]) super().__init__(pe_name, path, func=self.process)
def get_dir(self): """ Get or create directory of this collection :return: directory of the collection """ return ersa_utils.get_block_dir('data', ['collection', self.clc_name])
def __init__(self, model, file_list, model_dir, init_op, reader_op, ds_name='default', save_result_parent_dir=None, name='nn_estimator_segment_scene', gpu=None, verb=True, load_epoch_num=None, best_model=False, score_result=False, split_char='_', post_func=None, save_func=None, ignore_label=(), **kwargs): """ :param model: model to be evaluated :param file_list: evaluation file list :param model_dir: path to the pretrained model :param init_op: initialize operation for the dataset reader :param reader_op: reader_op, return feature and label if scoring the results :param ds_name: name of the dataset :param save_result_parent_dir: parent directory to where the result will be stored :param name: name of the process :param gpu: which gpu to run the model, default to use all the gpus available :param verb: if True, print out message when evaluating :param load_epoch_num: which epoch's ckpt to load :param best_model: if True, load the model with best performance on the validation set :param score_result: if False, no gt used to score results :param split_char: character used to split file name :param post_func: post processing function to make prediction :param save_func: post processing function to make visible prediction map :param ignore_label: labels to be ignored at scoring :param kwargs: other parameters """ self.model = model self.file_list = file_list self.model_dir = model_dir self.init_op = init_op self.reader_op = reader_op self.model_name = model_dir.split('/')[-1] if save_result_parent_dir is None: self.score_save_dir = ersa_utils.get_block_dir( 'eval', [self.model_name, ds_name]) else: self.score_save_dir = ersa_utils.get_block_dir( 'eval', [save_result_parent_dir, self.model_name, ds_name]) self.gpu = gpu self.verb = verb self.load_epoch_num = load_epoch_num self.best_model = best_model self.score_results = score_result self.split_char = split_char self.post_func = post_func self.save_func = save_func self.ignore_label = ignore_label self.kwargs = kwargs super().__init__(name, self.score_save_dir, func=self.process)
def __init__(self, model, file_list, input_size, tile_size, batch_size, img_mean, model_dir, ds_name='default', save_result_parent_dir=None, name='nn_estimator_segment', gpu=None, verb=True, load_epoch_num=None, best_model=False, truth_val=1, score_results=False, split_char='_', **kwargs): """ :param model: model to be evaluated :param file_list: evaluation file list :param input_size: dimension of the input to the network :param tile_size: dimension of the single evaluation file :param batch_size: batch size :param img_mean: mean of each channel :param model_dir: path to the pretrained model :param ds_name: name of the dataset :param save_result_parent_dir: parent directory to where the result will be stored :param name: name of the process :param gpu: which gpu to run the model, default to use all the gpus available :param verb: if True, print out message when evaluating :param load_epoch_num: which epoch's ckpt to load :param best_model: if True, load the model with best performance on the validation set :param truth_val: value of H1 pixel in gt :param score_results: if False, no gt used to score results :param split_char: character used to split file name :param kwargs: other parameters """ self.model = model self.file_list = file_list self.input_size = input_size self.tile_size = tile_size self.batch_size = batch_size self.img_mean = img_mean self.model_dir = model_dir self.model_name = model_dir.split('/')[-1] if save_result_parent_dir is None: self.score_save_dir = ersa_utils.get_block_dir( 'eval', [self.model_name, ds_name]) else: self.score_save_dir = ersa_utils.get_block_dir( 'eval', [save_result_parent_dir, self.model_name, ds_name]) self.gpu = gpu self.verb = verb self.load_epoch_num = load_epoch_num self.best_model = best_model self.truth_val = truth_val self.score_results = score_results self.split_char = split_char self.kwargs = kwargs self.compute_shape_flag = False # recompute tile shape or not super().__init__(name, self.score_save_dir, func=self.process)