def _generate_prediction(meta_data, pipeline, logger, category_ids): data = {'input': {'meta': meta_data, 'target_sizes': [(300, 300)] * len(meta_data), }, 'specs': {'train_mode': True}, 'callback_input': {'meta_valid': None} } pipeline.clean_cache() output = pipeline.transform(data) pipeline.clean_cache() y_pred = output['y_pred'] prediction = create_annotations(meta_data, y_pred, logger, category_ids) return prediction
def _generate_prediction_in_chunks(meta_data, pipeline, logger, category_ids, chunk_size): prediction = [] for meta_chunk in generate_data_frame_chunks(meta_data, chunk_size): data = {'input': {'meta': meta_chunk, 'target_sizes': [(300, 300)] * len(meta_chunk) }, 'specs': {'train_mode': True}, 'callback_input': {'meta_valid': None} } pipeline.clean_cache() output = pipeline.transform(data) pipeline.clean_cache() y_pred = output['y_pred'] prediction_chunk = create_annotations(meta_chunk, y_pred, logger, category_ids) prediction.extend(prediction_chunk) return prediction
def _generate_prediction(self, cache_dirpath, outputs): data = { 'callback_input': { 'meta': self.meta_valid, 'meta_valid': None, 'target_sizes': [(300, 300)] * len(self.meta_valid), }, 'unet_output': { **outputs } } pipeline = self.validation_pipeline(cache_dirpath) for step_name in pipeline.all_steps: cmd = 'touch {}'.format( os.path.join(cache_dirpath, 'transformers', step_name)) subprocess.call(cmd, shell=True) output = pipeline.transform(data) y_pred = output['y_pred'] prediction = create_annotations(self.meta_valid, y_pred, logger, CATEGORY_IDS) return prediction