if cfg.save_best: global_valid = valid_metric.results_class_wise_average_metrics( )['f_measure']['f_measure'] if not no_weak: global_valid += np.mean(weak_metric) if save_best_cb.apply(global_valid): model_fname = os.path.join(saved_model_dir, "baseline_best") torch.save(state, model_fname) if cfg.save_best: model_fname = os.path.join(saved_model_dir, "baseline_best") state = torch.load(model_fname) LOG.info("testing model: {}".format(model_fname)) else: LOG.info("testing model of last epoch: {}".format(cfg.n_epoch)) # ############## # Validation # ############## predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv") test_model(state, cfg.validation, reduced_number_of_data, predicitons_fname) # ############## # Evaluation # ############## predicitons_eval2019_fname = os.path.join(saved_pred_dir, "baseline_eval2019.tsv") test_model(state, cfg.eval_desed, reduced_number_of_data, predicitons_eval2019_fname)
np.mean(weak_metric))) state['model']['state_dict'] = crnn.state_dict() state['model_ema']['state_dict'] = crnn_ema.state_dict() state['optimizer']['state_dict'] = optimizer.state_dict() state['epoch'] = epoch # state['valid_metric'] = valid_events_metric.results() if cfg.checkpoint_epochs is not None and ( epoch + 1) % cfg.checkpoint_epochs == 0: model_fname = os.path.join(saved_model_dir, '_epoch_' + str(epoch)) torch.save(state, model_fname) if cfg.save_best: global_valid = np.mean(weak_metric) if save_best_cb.apply(global_valid): model_fname = os.path.join(saved_model_dir, '_best') torch.save(state, model_fname) if cfg.save_best: model_fname = os.path.join(saved_model_dir, '_best') state = torch.load(model_fname) LOG.info("testing model: {}".format(model_fname)) else: LOG.info("testing model of last epoch: {}".format(cfg.n_epoch)) # ############## # Validation # ############## predictions_fname = os.path.join(saved_pred_dir, "_validation.csv") test_model(state, reduced_number_of_data, predictions_fname)