def fold_evaluate_loop(valid_data_split, fold_id): valid_pipe_input = { 'input': { 'meta': valid_data_split }, 'callback_input': { 'meta_valid': None } } valid_ids = valid_data_split[ID_COLUMN].tolist() LOGGER.info('Start pipeline transform on valid') pipeline_network = unet(config=CONFIG, suffix='_fold_{}'.format(fold_id), train_mode=False) pipeline_postprocessing = pipelines.mask_postprocessing( config=CONFIG, suffix='_fold_{}'.format(fold_id)) pipeline_network.clean_cache() pipeline_postprocessing.clean_cache() predicted_masks_valid = pipeline_network.transform(valid_pipe_input) valid_pipe_masks = {'input_masks': predicted_masks_valid} output_valid = pipeline_postprocessing.transform(valid_pipe_masks) utils.clean_object_from_memory(pipeline_network) y_pred_valid = output_valid['binarized_images'] y_true_valid = utils.read_masks(valid_data_split[Y_COLUMN].values) iou, iout = calculate_scores(y_true_valid, y_pred_valid) predicted_masks_valid = predicted_masks_valid['resized_images'] return iou, iout, (valid_ids, predicted_masks_valid)
def fold_fit_evaluate_loop(train_data_split, valid_data_split, fold_id): train_pipe_input = {'input': {'meta': train_data_split }, 'callback_input': {'meta_valid': valid_data_split } } valid_pipe_input = {'input': {'meta': valid_data_split }, 'callback_input': {'meta_valid': None } } valid_ids = valid_data_split[ID_COLUMN].tolist() LOGGER.info('Start pipeline fit and transform on train') config = add_fold_id_suffix(CONFIG, fold_id) pipeline_network = network(config=config, suffix='_fold_{}'.format(fold_id), train_mode=True) pipeline_network.clean_cache() pipeline_network.fit_transform(train_pipe_input) utils.clean_object_from_memory(pipeline_network) LOGGER.info('Start pipeline transform on valid') pipeline_network = network(config=config, suffix='_fold_{}'.format(fold_id), train_mode=False) pipeline_network.clean_cache() predicted_masks_valid = pipeline_network.transform(valid_pipe_input) utils.clean_object_from_memory(pipeline_network) y_pred_valid = predicted_masks_valid['resized_images'] y_true_valid = valid_data_split[Y_COLUMN].values auc = calculate_scores(y_true_valid, y_pred_valid) return auc, (valid_ids, y_pred_valid)
def fold_evaluate_predict_loop(valid_data_split, test, fold_id): iou, iout, predicted_masks_valid = fold_evaluate_loop(valid_data_split, fold_id) test_pipe_input = {'input': {'meta': test }, 'callback_input': {'meta_valid': None } } pipeline_network = network(config=CONFIG, suffix='_fold_{}'.format(fold_id), train_mode=False) LOGGER.info('Start pipeline transform on test') pipeline_network.clean_cache() predicted_masks_test = pipeline_network.transform(test_pipe_input) utils.clean_object_from_memory(pipeline_network) predicted_masks_test = predicted_masks_test['resized_images'] return iou, iout, predicted_masks_valid, predicted_masks_test