def showmatch(model_paths, task, randomize, hardness, num_envs, symmetric, random_rules): if torch.cuda.is_available(): device = torch.device("cuda:0") else: print("Running on CPU") device = "cpu" if len(model_paths) == 1: opponents = None elif len(model_paths) == 2: opponents = {'player2': {'model_file': model_paths[1]}} else: raise Exception("Invalid args") objective = envs.Objective(task) policy1, _, _, _, _ = load_policy(model_paths[0], device) eval( policy=policy1, num_envs=num_envs, device=device, objective=objective, eval_steps=int(1e20), opponents=opponents, printerval=500, randomize=randomize, hardness=hardness, symmetric=symmetric, random_rules=random_rules, )
def FUNCTION(*arg): #ここで束縛されるargは実引数となる #dmyargは"(x , y , z)"みたいな仮引数 if dmyarg == "nil": lst = [pr.eval(each , scope) for each in body] else: s = pr.joindict({e:arg[i]for i,e in enumerate(pr.tokenize(dmyarg))},scope) lst = [pr.eval(each , s) for each in body] return lst.pop()
def FUNCTION(*arg): #ここで束縛されるargは実引数となる #dmyargは"(x , y , z)"みたいな仮引数 if dmyarg == "nil": lst = [pr.eval(each, scope) for each in body] else: s = pr.joindict( {e: arg[i] for i, e in enumerate(pr.tokenize(dmyarg))}, scope) lst = [pr.eval(each, s) for each in body] return lst.pop()
def defconst(symbol , value , scope): if not pr.issymbol(symbol):raise pr.Error(const.ERROR["NOT_SYMBL"]%(symbol)) if symbol in const.GLOBAL_VALUE:return const.GLOBAL_VALUE[symbol] val = pr.eval(value,scope) const.GLOBAL_VALUE[symbol] = val return val
def defconst(symbol, value, scope): if not pr.issymbol(symbol): raise pr.Error(const.ERROR["NOT_SYMBL"] % (symbol)) if symbol in const.GLOBAL_VALUE: return const.GLOBAL_VALUE[symbol] val = pr.eval(value, scope) const.GLOBAL_VALUE[symbol] = val return val
tokenizer = BertTokenizer.from_pretrained(args.load_pretrained_model_path) model = Bert_TCN(args).cuda() print(get_parameter_number(model)) if args.model_path: model = load_model(model) print("successfully load pre-trained model") else: print("no pre-trained model") exit(0) print("===loading test data==") test_dataset = WeiboDataset(args.data_path, file_type='test_simplified') test_batches = DataLoader(dataset=test_dataset, batch_size=args.batch_size, shuffle=False, collate_fn=collate_fn) print("test:length={},batch_num={}".format(test_dataset.__len__(), len(test_batches))) optimizer = getattr(optim, args.optim) optimizer = optimizer(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) print('begin test evaluation') test_loss, test_scores, test_predicts = eval(args, model, tokenizer, test_batches) write_test_predictions(test_batches, test_predicts)
model = RAMNetwork(FLAGS=FLAGS, full_summary=False) with tf.Session() as sess: model.saver.restore(sess, FLAGS.path + "/cp.ckpt") start_step = model.global_step.eval(session=sess) tf.logging.info('Evaluate model at step: %d ', start_step) train_writer, valid_writer, test_writer, train_handle, valid_handle, test_handle = model.setup( sess, train_data, valid_data, test_data) Visual = Visualization(model, FLAGS) # Test set eval(model, sess, FLAGS, valid_handle, FLAGS.batches_per_eval_valid, valid_writer, prefix='VALIDATION - LAST MODEL: ') eval(model, sess, FLAGS, test_handle, FLAGS.batches_per_eval_test, test_writer, prefix='TEST - LAST MODEL: ') Visual(sess, 'test_set', test_handle) model.saver.restore(sess, FLAGS.path + "/cp_best.ckpt") eval(model, sess,
def eval(*arg): return pr.eval(arg[0] , arg[1])
"cons" :(True , (lambda x,y: apply(makelist,[x] + pr.tokenize(y))) , False), "car" :(True , (lambda x:pr.tokenize(x)[0]) , False), "cdr" :(True , (lambda x:apply(makelist ,pr.tokenize(x)[1:])) , False), "list" :(True , makelist , False), "last" :(True , last , False), "length" :(True , (lambda x: len(pr.tokenize(x))) , False), "init" :(True , init , False), "map" :(True , map_ , False), "filter" :(True , filter_ , False), "list?" :(True , (lambda x:pr.booltolisp(pr.W_islist(x))) , False), "atom?" :(True , (lambda x:pr.booltolisp(pr.W_isatom(x))) , False), "symbol?" :(True , (lambda x:pr.booltolisp(pr.W_issymbol(x))) , False), "null?" :(True , (lambda x:pr.booltolisp(pr.W_isnil(x))) , False), "equal?" :(True , (lambda x,y: pr.booltolisp(x == y)) , False), "print" :(True , (lambda x: show(x)) , False), "define" :(False , define , False), "defconst":(False , defconst , False), "lambda" :(False , lmd , False), "if" :(False , (lambda c,x,y,s: pr.eval(x,s) if pr.booltopy(pr.eval(c,s)) else pr.eval(y,s)) , False), "eval" :(True , eval ,True), "exit" :(True , fin , False) }
def eval(*arg): return pr.eval(arg[0], arg[1])
"and" :(True , (lambda x,y: pr.booltolisp(pr.booltopy(x) and pr.booltopy(y))) , False), "or" :(True , (lambda x,y: pr.booltolisp(pr.booltopy(x) or pr.booltopy(y))) , False), "not" :(True , (lambda x: pr.booltolisp(not pr.booltopy(x)) ) , False), "cons" :(True , (lambda x,y: apply(makelist,[x] + pr.tokenize(y))) , False), "car" :(True , (lambda x:pr.tokenize(x)[0]) , False), "cdr" :(True , (lambda x:apply(makelist ,pr.tokenize(x)[1:])) , False), "list" :(True , makelist , False), "last" :(True , last , False), "length" :(True , (lambda x: len(pr.tokenize(x))) , False), "init" :(True , init , False), "map" :(True , map_ , False), "filter" :(True , filter_ , False), "list?" :(True , (lambda x:pr.booltolisp(pr.W_islist(x))) , False), "atom?" :(True , (lambda x:pr.booltolisp(pr.W_isatom(x))) , False), "symbol?" :(True , (lambda x:pr.booltolisp(pr.W_issymbol(x))) , False), "null?" :(True , (lambda x:pr.booltolisp(pr.W_isnil(x))) , False), "equal?" :(True , (lambda x,y: pr.booltolisp(x == y)) , False), "print" :(True , (lambda x: show(x)) , False), "define" :(False , define , False), "defconst":(False , defconst , False), "lambda" :(False , lmd , False), "if" :(False , (lambda c,x,y,s: pr.eval(x,s) if pr.booltopy(pr.eval(c,s)) else pr.eval(y,s)) , False), "eval" :(True , eval ,True), "exit" :(True , fin , False) }