def main(): """Main function that trains and/or evaluates a model.""" params = interpret_args() # Prepare the dataset into the proper form. data = atis_data.ATISDataset(params) # Construct the model object. model_type = InteractionATISModel if params.interaction_level else ATISModel model = model_type( params, data.input_vocabulary, data.output_vocabulary, data.anonymizer if params.anonymize and params.anonymization_scoring else None) last_save_file = "" if params.train: last_save_file = train(model, data, params) if params.evaluate: evaluate(model, data, params, last_save_file) if params.interactive: interact(model, params, data.anonymizer, last_save_file) if params.attention: evaluate_attention(model, data, params, params.save_file)
def main(): """Main function that trains and/or evaluates a model.""" params = interpret_args() # Prepare the dataset into the proper form. data = atis_data.ATISDataset(params) params.num_db = len(data.db2id) # Construct the model object. if params.interaction_level: model_type = SchemaInteractionATISModel else: print('not implemented') exit() model = model_type( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model = model.cuda() print('=====================Model Parameters=====================') for name, param in model.named_parameters(): print(name, param.requires_grad, param.is_cuda, param.size()) assert param.is_cuda model.build_optim() print('=====================Parameters in Optimizer==============') for param_group in model.trainer.param_groups: print(param_group.keys()) for param in param_group['params']: print(param.size()) if params.fine_tune_bert: print('=====================Parameters in BERT Optimizer==============') for param_group in model.bert_trainer.param_groups: print(param_group.keys()) for param in param_group['params']: print(param.size()) sys.stdout.flush() last_save_file = "" if params.train: last_save_file = train(model, data, params) if params.evaluate and 'valid' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='valid') if params.evaluate and 'dev' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='dev') if params.evaluate and 'test' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='test')
def main(): f = open('first.txt', 'r', encoding='utf-8') all_pre = [] sourceInLine = f.readlines() for line in sourceInLine: temp1 = line.strip('\n') all_pre.append(temp1.split()) """Main function that trains and/or evaluates a model.""" params = interpret_args() # Prepare the dataset into the proper form. data = atis_data.ATISDataset(params) # Construct the model object. if params.interaction_level: model_type = SchemaInteractionATISModel else: print('not implemented') exit() model = model_type( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model = model.cuda() print('=====================Model Parameters=====================') for name, param in model.named_parameters(): print(name, param.requires_grad, param.is_cuda, param.size()) assert param.is_cuda model.build_optim() print('=====================Parameters in Optimizer==============') for param_group in model.trainer.param_groups: print(param_group.keys()) for param in param_group['params']: print(param.size()) sys.stdout.flush() last_save_file = "" if params.train: last_save_file = train(all_pre, model, data, params) if params.evaluate and 'valid' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='valid') if params.evaluate and 'dev' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='dev') if params.evaluate and 'test' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='test')
def main(): """Main function that trains and/or evaluates a model.""" params = interpret_args() # Prepare the dataset into the proper form. data = atis_data.ATISDataset(params) if params.new_version: #my_vocab = Vocabulary(params.interaction_train, params.interaction_valid) #pickle.dump(my_vocab, open("new_vocab_train", "wb")) my_vocab = pickle.load(open("new_vocab_train", "rb")) print(my_vocab.id2label) print(len(my_vocab)) data.output_vocabulary = my_vocab #new_interaction_train = pickle.load(open("interactions_new_train", "rb")) #new_interaction_valid = pickle.load(open("interactions_new_valid", "rb")) #data.train_data.examples = new_interaction_train #data.valid_data.examples = new_interaction_valid transfer_dataset(data.valid_data, name="valid", maximum=params.train_maximum_sql_length) transfer_dataset(data.train_data, name="train", maximum=params.train_maximum_sql_length) # Construct the model object. model_type = InteractionATISModel if params.interaction_level else ATISModel model = model_type( params, data.input_vocabulary, data.output_vocabulary, data.anonymizer if params.anonymize and params.anonymization_scoring else None) last_save_file = params.save_file if params.train: last_save_file = train(model, data, params, last_save_file) if params.evaluate: evaluate(model, data, params, last_save_file) if params.interactive: interact(model, params, data.anonymizer, last_save_file) if params.attention: evaluate_attention(model, data, params, params.save_file)
def main(): """Main function that trains and/or evaluates a model.""" params = interpret_args() init_env(params) # Prepare the dataset into the proper form. data = atis_data.ATISDataset(params) # Construct the model object. if params.interaction_level: model_type = SchemaInteractionATISModel else: print('not implemented') exit() model = model_type( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None) model = model.cuda() model.build_optim() sys.stdout.flush() last_save_file = "" if params.train: last_save_file = train(model, data, params) if params.evaluate and 'train' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='train') if params.evaluate and 'valid' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='valid') if params.evaluate and 'dev' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='dev') if params.evaluate and 'test' in params.evaluate_split: evaluate(model, data, params, last_save_file, split='test')
database_password=params.database_password, database_timeout=params.database_timeout, use_predicted_queries=params.use_predicted_queries, max_generation_length=params.eval_maximum_sql_length, write_results=True, use_gpu=True, compute_metrics=params.compute_metrics, step=step, first=first) if __name__ == '__main__': if os.path.exists('logs_sparc_pg_gsql/args.log'): os.remove('logs_sparc_pg_gsql/args.log') """Main function that trains and/or evaluates a model.""" params = interpret_args() data = atis_data.ATISDataset(params) # Construct the model object. if params.interaction_level: model_type = SchemaInteractionATISModel else: print('not implemented') exit() if params.use_query_attention == 1: params.use_query_attention = 1 model = model_type( params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, data.anonymizer if params.anonymize and params.anonymization_scoring else None)
from data_processing import csv_processing import verify as verify import parse_args def processing(args): csv_data = csv_processing.get_data(args.data_path) # print(csv_data) model = Model(csv_data, args) #1 # model=Angle_model(csv_data,args) # model=Prob_Model(csv_data,args)#3 ans = model.run_raw() if args.top5: model.run_angle() else: verify.verify(args, csv_data, model.dist_data, ans) import numpy as np import decimal from time import * if __name__ == '__main__': begin_time = time() args = parse_args.interpret_args() processing(args) end_time = time() run_time = end_time - begin_time print('该程序运行时间:', run_time)
def main(): """Main function that trains and/or evaluates a model.""" params = interpret_args() if params.gan: assert params.max_gen_len == params.train_maximum_sql_length \ == params.eval_maximum_sql_length data = atis_data.ATISDataset(params) generator = SchemaInteractionATISModel(params, data.input_vocabulary, data.output_vocabulary, data.output_vocabulary_schema, None) generator = generator.cuda() generator.build_optim() if params.gen_from_ckp: gen_ckp_path = os.path.join(params.logdir, params.gen_pretrain_ckp) if params.fine_tune_bert: gen_epoch, generator, generator.trainer, \ generator.bert_trainer = \ load_ckp( gen_ckp_path, generator, generator.trainer, generator.bert_trainer ) else: gen_epoch, generator, generator.trainer, _ = \ load_ckp( gen_ckp_path, generator, generator.trainer ) else: gen_epoch = 0 print('====================Model Parameters====================') print('=======================Generator========================') for name, param in generator.named_parameters(): print(name, param.requires_grad, param.is_cuda, param.size()) assert param.is_cuda print('==================Optimizer Parameters==================') print('=======================Generator========================') for param_group in generator.trainer.param_groups: print(param_group.keys()) for param in param_group['params']: print(param.size()) if params.fine_tune_bert: print('=========================BERT===========================') for param_group in generator.bert_trainer.param_groups: print(param_group.keys()) for param in param_group['params']: print(param.size()) sys.stdout.flush() # Pre-train generator with MLE if params.train: print('=============== Pre-training generator! ================') train(generator, data, params, gen_epoch) print('=========== Pre-training generator complete! ===========') dis_filter_sizes = [i for i in range(1, params.max_gen_len, 4)] dis_num_filters = [(100 + i * 10) for i in range(1, params.max_gen_len, 4)] discriminator = Discriminator(params, data.dis_src_vocab, data.dis_tgt_vocab, params.max_gen_len, params.num_dis_classes, dis_filter_sizes, dis_num_filters, params.max_pos_emb, params.num_tok_type, params.dis_dropout) discriminator = discriminator.cuda() dis_criterion = nn.NLLLoss(reduction='mean') dis_criterion = dis_criterion.cuda() dis_optimizer = optim.Adam(discriminator.parameters()) if params.dis_from_ckp: dis_ckp_path = os.path.join(params.logdir, params.dis_pretrain_ckp) dis_epoch, discriminator, dis_optimizer, _ = load_ckp( dis_ckp_path, discriminator, dis_optimizer) else: dis_epoch = 0 print('====================Model Parameters====================') print('=====================Discriminator======================') for name, param in discriminator.named_parameters(): print(name, param.requires_grad, param.is_cuda, param.size()) assert param.is_cuda print('==================Optimizer Parameters==================') print('=====================Discriminator======================') for param_group in dis_optimizer.param_groups: print(param_group.keys()) for param in param_group['params']: print(param.size()) sys.stdout.flush() # Pre-train discriminator if params.pretrain_discriminator: print('============= Pre-training discriminator! ==============') pretrain_discriminator(params, generator, discriminator, dis_criterion, dis_optimizer, data, start_epoch=dis_epoch) print('========= Pre-training discriminator complete! =========') # Adversarial Training if params.adversarial_training: print('================ Adversarial training! =================') generator.build_optim() dis_criterion = nn.NLLLoss(reduction='mean') dis_optimizer = optim.Adam(discriminator.parameters()) dis_criterion = dis_criterion.cuda() if params.adv_from_ckp and params.mle is not "mixed_mle": adv_ckp_path = os.path.join(params.logdir, params.adv_ckp) if params.fine_tune_bert: epoch, batches, pos_in_batch, generator, discriminator, \ generator.trainer, dis_optimizer, \ generator.bert_trainer, _, _ = \ load_adv_ckp( adv_ckp_path, generator, discriminator, generator.trainer, dis_optimizer, generator.bert_trainer) else: epoch, batches, pos_in_batch, generator, discriminator, \ generator.trainer, dis_optimizer, _, _, _ = \ load_adv_ckp( adv_ckp_path, generator, discriminator, generator.trainer, dis_optimizer) adv_train(generator, discriminator, dis_criterion, dis_optimizer, data, params, start_epoch=epoch, start_batches=batches, start_pos_in_batch=pos_in_batch) elif params.adv_from_ckp and params.mle == "mixed_mle": adv_ckp_path = os.path.join(params.logdir, params.adv_ckp) if params.fine_tune_bert: epoch, batches, pos_in_batch, generator, discriminator, \ generator.trainer, dis_optimizer, \ generator.bert_trainer, clamp, length = \ load_adv_ckp( adv_ckp_path, generator, discriminator, generator.trainer, dis_optimizer, generator.bert_trainer, mle=True) else: epoch, batches, pos_in_batch, generator, discriminator, \ generator.trainer, dis_optimizer, _, clamp, length = \ load_adv_ckp( adv_ckp_path, generator, discriminator, generator.trainer, dis_optimizer, mle=True) mixed_mle(generator, discriminator, dis_criterion, dis_optimizer, data, params, start_epoch=epoch, start_batches=batches, start_pos_in_batch=pos_in_batch, start_clamp=clamp, start_len=length) else: if params.mle == 'mixed_mle': mixed_mle(generator, discriminator, dis_criterion, dis_optimizer, data, params) else: adv_train(generator, discriminator, dis_criterion, dis_optimizer, data, params) if params.evaluate and 'valid' in params.evaluate_split: print("================== Evaluating! ===================") evaluate(generator, data, params, split='valid') print("============= Evaluation finished! ===============")
def main(): params = interpret_args() data = atis_data.ATISDataset(params)