Exemple #1
0
def main():
    '''main method.'''
    args = parse_train_args()

    logger = create_logger(name='train',
                           save_dir=args.save_dir,
                           quiet=args.quiet)

    cross_validate(args, logger)
Exemple #2
0
            f'Seed {init_seed + fold_num} ==> test {args.metric} = {np.nanmean(scores):.6f}'
        )

        if args.show_individual_scores:
            for task_name, score in zip(task_names, scores):
                info(
                    f'Seed {init_seed + fold_num} ==> test {task_name} {args.metric} = {score:.6f}'
                )

    # Report scores across models
    avg_scores = np.nanmean(
        all_scores, axis=1)  # average score for each model across tasks
    mean_score, std_score = np.nanmean(avg_scores), np.nanstd(avg_scores)
    info(f'Overall test {args.metric} = {mean_score:.6f} +/- {std_score:.6f}')

    if args.show_individual_scores:
        for task_num, task_name in enumerate(task_names):
            info(
                f'Overall test {task_name} {args.metric} = '
                f'{np.nanmean(all_scores[:, task_num]):.6f} +/- {np.nanstd(all_scores[:, task_num]):.6f}'
            )

    return mean_score, std_score


if __name__ == '__main__':
    args = parse_train_args()
    logger = create_logger(name='train',
                           save_dir=args.save_dir,
                           quiet=args.quiet)
    cross_validate(args, logger)