args.fine_tuned_encoder, 'encoder.pkl') logger.info("Loading encoder from: {}".format(encoder_path)) else: encoder_path = None start_epoch = 0 best_joint_f1 = 0 learning_rate = args.learning_rate # Set Encoder and Model encoder, _ = load_encoder_model(args.encoder_name_or_path, args.model_type) model = HierarchicalGraphNetwork(config=args) if encoder_path is not None: encoder.load_state_dict(torch.load(encoder_path)) if model_path is not None: model.load_state_dict(torch.load(model_path)) ####################################################################################### if args.frozen_layer_number > 0: modules = [ encoder.embeddings, *encoder.encoder.layer[:args.frozen_layer_number] ] for module in modules: for param in module.parameters(): param.requires_grad = False logging.info('Frozen the first {} layers'.format(args.frozen_layer_number)) ####################################################################################### encoder.to(args.device) model.to(args.device)
if encoder_path is not None: state_dict = torch.load(encoder_path) print('loading parameter from {}'.format(encoder_path)) for key in list(state_dict.keys()): if 'module.' in key: state_dict[key.replace('module.', '')] = state_dict[key] del state_dict[key] encoder.load_state_dict(state_dict) if model_path is not None: state_dict = torch.load(model_path) print('loading parameter from {}'.format(model_path)) for key in list(state_dict.keys()): if 'module.' in key: state_dict[key.replace('module.', '')] = state_dict[key] del state_dict[key] model.load_state_dict(state_dict) encoder.to(args.device) model.to(args.device) encoder.eval() model.eval() ######################################################################### # Evaluation ########################################################################## output_pred_file = join(args.exp_name, args.train_type + '_train_pred.json') output_eval_file = join(args.exp_name, args.train_type + '_train_eval.txt') output_score_file = join(args.exp_name, args.train_type + '_train_score.json')