def transform_q2s(question): #### Input: question $q$, # # lower case everything (why?) # question = question.lower() #### dependency parsed. q = Sentence(get_parse(question), question, "en", print_tokens="new") doc = nlp(question) lemmas = {token.text.lower(): token.lemma_ for token in doc} past_tense = {token.text: token.tag_ == "VBD" for token in doc} # if question doesn't start with "why", stop there if q.word(1).lower() != "why": return ("ERROR: can't transform: does not start with 'why'") # replace "'s" with "is" and "'d" with "did" where appropriate question, q = replacements(q, {"old": "'s", "new": "is", "pos": "V"}) question, q = replacements(q, {"old": "'re", "new": "are", "pos": "V"}) question, q = replacements(q, {"old": "'d", "new": "did", "pos": "M"}) question, q = replacements(q, {"old": "'ve", "new": "have", "pos": "V"}) # remove the final question mark if q.word(len(q.tokens)) == "?": q.cut(len(q.tokens)) # if question starts with "why is it that", return the rest if (q.words(1, 5).lower() == "why is it that ") or (q.words(1, 5).lower() == "why was it that "): for i in range(4): q.cut(1) return cleanup_statement(q) #### Start at the root of $q$ all_indices = [t["index"] for t in q.tokens] root_index = [i for i in all_indices if "ROOT" in q.find_dep_types(i)][0] #### $subj$ = \textsc{nsubj} or \textsc{nsubjpass} %dependent of the root subj_end_index, subj_is_plural = get_subj(q, root_index, lemmas) #### $vp^{(\text{lemma})}$ = all remaining dependents # (everything after subj_end_index is the lemmatized vp) #### if $aux$ in [``do'', ``does'', ``did''] if (q.word(2).lower() in ["do", "does", "did"]): #### $vp$ = apply tense/person of $aux$ to $vp^{(\text{lemma})}$ q = conjugate_main_verb(q, q.word(2), root_index) else: #### else $vp$ = $aux$ $vp^{(\text{lemma})}$ # if it's not "do", just move the single helping word to after the subj NP q.move(2, subj_end_index) #### $s$ = $subj$ $vp$ #### Remove "Why". q.cut(1) return cleanup_statement(q)
try: ModelSaver(saved_mdl, mdl_name=mdl_name, dir_save=model_path + mdl_name, name_save='/saved_model_' + str(task))() except: print("Don't have saving model mode!") log = dict() log['y_pred'] = y_pred_ log['y_valid'] = y_valid_ save2pickle(log, out_path=forecast_path + mdl_name, out_file='/saved_model_' + str(task) + '.pickle') if __name__ == "__main__": from parser import get_parse config = get_parse() main( mdl_name=config.mdl_name, input_path=config.input_path, model_path=config.model_path, forecast_path=config.forecast_path, batch_size=config.batch_size, epochs=config.epochs, task=config.num_jobs, )
import json import logging import numpy as np import copy import torch import torch.nn as nn import random import torch.optim as optim from evaluator import OFAEvaluator from torchprofile import profile_macs from codebase.networks import NSGANetV2 from parser import get_parse from eval_utils import get_dataset args = get_parse() os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu device_list = [int(_) for _ in args.gpu.split(',')] args.n_gpus = len(device_list) args.device = torch.device("cuda:0") if args.seed is None or args.seed < 0: args.seed = random.randint(1, 100000) torch.cuda.manual_seed(args.seed) torch.manual_seed(args.seed) np.random.seed(args.seed) random.seed(args.seed) evaluator = OFAEvaluator(args, model_path='../.torch/ofa_nets/ofa_mbv3_d234_e346_k357_w1.0')