def _load_state_vars(state, gtruth_df, median_win=None): pred_df = gtruth_df.copy() # Define dataloader many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"]) scaler = _load_scaler(state) crnn = _load_crnn(state) # Note, need to unsqueeze axis 1 transforms_valid = get_transforms(cfg.max_frames, scaler=scaler, add_axis=1) # Note, no dataloader here strong_dataload = DataLoadDf(pred_df, many_hot_encoder.encode_strong_df, transforms_valid, return_indexes=True) pooling_time_ratio = state["pooling_time_ratio"] many_hot_encoder = ManyHotEncoder.load_state_dict(state["many_hot_encoder"]) if median_win is None: median_win = state["median_window"] return { "model": crnn, "dataload": strong_dataload, "pooling_time_ratio": pooling_time_ratio, "many_hot_encoder": many_hot_encoder, "median_window": median_win }
if save_best_cb.apply(valid_synth_f1): model_fname = os.path.join(saved_model_dir, "baseline_best") torch.save(state, model_fname) results.loc[epoch, "global_valid"] = valid_synth_f1 results.loc[epoch, "loss"] = loss_value.item() results.loc[epoch, "valid_synth_f1"] = valid_synth_f1 if cfg.early_stopping: if early_stopping_call.apply(valid_synth_f1): logger.warn("EARLY STOPPING") break if cfg.save_best: model_fname = os.path.join(saved_model_dir, "baseline_best") state = torch.load(model_fname) crnn = _load_crnn(state) logger.info(f"testing model: {model_fname}, epoch: {state['epoch']}") else: logger.info("testing model of last epoch: {}".format(cfg.n_epoch)) results_df = pd.DataFrame(results).to_csv(os.path.join( saved_pred_dir, "results.tsv"), sep="\t", index=False, float_format="%.4f") # ############## # Validation # ############## crnn.eval() transforms_valid = get_transforms(cfg.max_frames, scaler, add_axis_conv) predicitons_fname = os.path.join(saved_pred_dir, "baseline_validation.tsv")