コード例 #1
0
def train(args):
    print('Training:')

    train_data = data_utils.load_dataset(args.train_dataset, args)
    train_data_size = len(train_data)
    if args.train_proportion < 1.0:
        random.shuffle(train_data)
        train_data_size = int(train_data_size * args.train_proportion)
        train_data = train_data[:train_data_size]

    eval_data = data_utils.load_dataset(args.val_dataset, args)

    DataProcessor = data_utils.jspDataProcessor(args)
    model_supervisor = create_model(args)

    if args.resume:
        resume_step = True
    else:
        resume_step = False
    resume_idx = args.resume * args.batch_size

    logger = model_utils.Logger(args)
    if args.resume:
        logs = pd.read_csv("../logs/" + args.log_name)
        for index, log in logs.iterrows():
            val_summary = {
                'avg_reward': log['avg_reward'],
                'global_step': log['global_step']
            }
            logger.write_summary(val_summary)

    for epoch in range(resume_idx // train_data_size, args.num_epochs):
        random.shuffle(train_data)
        for batch_idx in range(0 + resume_step * resume_idx % train_data_size,
                               train_data_size, args.batch_size):
            resume_step = False
            print(epoch, batch_idx)
            batch_data = DataProcessor.get_batch(train_data, args.batch_size,
                                                 batch_idx)
            train_loss, train_reward = model_supervisor.train(batch_data)
            print('train loss: %.4f train reward: %.4f' %
                  (train_loss, train_reward))

            if (args.resume +
                    model_supervisor.global_step) % args.eval_every_n == 0:
                eval_loss, eval_reward = model_supervisor.eval(
                    eval_data, args.output_trace_flag, args.max_eval_size)
                val_summary = {
                    'avg_reward': eval_reward,
                    'global_step': args.resume + model_supervisor.global_step
                }
                logger.write_summary(val_summary)
                model_supervisor.save_model()

            if args.lr_decay_steps is not None and (
                    args.resume +
                    model_supervisor.global_step) % args.lr_decay_steps == 0:
                model_supervisor.model.lr_decay(args.lr_decay_rate)
                if model_supervisor.model.cont_prob > 0.01:
                    model_supervisor.model.cont_prob *= 0.5
コード例 #2
0
def evaluate(args):
    print('Evaluation:')

    test_data = data_utils.load_dataset(args.test_dataset, args)
    test_data_size = len(test_data)

    args.dropout_rate = 0.0

    DataProcessor = data_utils.HalideDataProcessor()

    if args.test_min_len is not None:
        test_data = DataProcessor.prune_dataset(test_data,
                                                min_len=args.test_min_len)

    term_vocab, term_vocab_list = DataProcessor.load_term_vocab()
    op_vocab, op_vocab_list = DataProcessor.load_ops()
    args.term_vocab_size = len(term_vocab)
    args.op_vocab_size = len(op_vocab)
    model_supervisor = create_model(args, term_vocab, term_vocab_list,
                                    op_vocab, op_vocab_list)
    test_loss, test_reward = model_supervisor.eval(test_data,
                                                   args.output_trace_flag,
                                                   args.output_trace_option,
                                                   args.output_trace_file)

    print('test loss: %.4f test reward: %.4f' % (test_loss, test_reward))
コード例 #3
0
def train(args):
	print('Training:')
	
	train_data = data_utils.load_dataset(args.train_dataset, args)
	eval_data = data_utils.load_dataset(args.val_dataset, args)

	DataProcessor = data_utils.HalideDataProcessor()

	if args.train_proportion < 1.0:
		random.shuffle(train_data)
		train_data_size = int(train_data_size * args.train_proportion)
		train_data = train_data[:train_data_size]

	if args.train_max_len is not None:
		train_data = DataProcessor.prune_dataset(train_data, max_len=args.train_max_len)

	train_data_size = len(train_data)
	term_vocab, term_vocab_list = DataProcessor.load_term_vocab()
	op_vocab, op_vocab_list = DataProcessor.load_ops()
	args.term_vocab_size = len(term_vocab)
	args.op_vocab_size = len(op_vocab)
	model_supervisor = create_model(args, term_vocab, term_vocab_list, op_vocab, op_vocab_list)
	
	logger = model_utils.Logger(args)

	for epoch in range(args.num_epochs):
		random.shuffle(train_data)
		for batch_idx in range(0, train_data_size, args.batch_size):
			print(epoch, batch_idx)
			batch_data = DataProcessor.get_batch(train_data, args.batch_size, batch_idx)
			train_loss, train_reward = model_supervisor.train(batch_data)
			print('train loss: %.4f train reward: %.4f' % (train_loss, train_reward))

			if model_supervisor.global_step % args.eval_every_n == 0:
				eval_loss, eval_reward = model_supervisor.eval(eval_data, args.output_trace_flag, args.output_trace_option, args.output_trace_file, args.max_eval_size)
				val_summary = {'avg_reward': eval_reward}
				model_supervisor.save_model()
				val_summary['global_step'] = model_supervisor.global_step
				logger.write_summary(val_summary)

			if args.lr_decay_steps is not None and model_supervisor.global_step % args.lr_decay_steps == 0:
				model_supervisor.model.lr_decay(args.lr_decay_rate)
				if model_supervisor.model.cont_prob > 0.01:
					model_supervisor.model.cont_prob *= 0.5
コード例 #4
0
def evaluate(args):
	print('Evaluation:')

	test_data = data_utils.load_dataset(args.test_dataset, args)
	test_data_size = len(test_data)
	args.dropout_rate = 0.0

	dataProcessor = data_utils.jspDataProcessor(args)
	model_supervisor = create_model(args)
	test_loss, test_reward = model_supervisor.eval(test_data, args.output_trace_flag)
	

	print('test loss: %.4f test reward: %.4f' % (test_loss, test_reward))
コード例 #5
0
def evaluate(args):
    print('Search:')

    test_data = data_utils.load_dataset(args.test_dataset, args)
    if args.test_min_len is not None:
        test_data = DataProcessor.prune_dataset(test_data,
                                                min_len=args.test_min_len)
        DataProcessor.calc_data_stat(test_data)
    data_size = len(test_data)
    test_data = test_data[:data_size]

    cum_expr_reward = 0
    cum_gt_reward = 0
    cum_tree_reward = 0

    for batch_idx in range(0, data_size, args.batch_size):
        batch_data = DataProcessor.get_batch(test_data, args.batch_size,
                                             batch_idx)
        for i, sample in enumerate(batch_data):
            gt_trace, tm = sample
            global expr_rec
            expr_rec = {}
            init_expr = tm.to_string(tm.root)
            len_tm = len(init_expr)
            num_nodes_tm = tm.num_trees
            res_tm, res_len, res_num_nodes = rewrite(tm, init_expr, len_tm,
                                                     num_nodes_tm, 0)
            cur_expr_reward = len(init_expr) - res_len
            cur_tree_reward = num_nodes_tm - res_num_nodes
            cur_gt_reward = len(gt_trace[0]) - len(gt_trace[-1])
            cum_expr_reward += cur_expr_reward
            cum_tree_reward += cur_tree_reward
            cum_gt_reward += cur_gt_reward
            print('sample %d cur expr reward: %.4f cur tree reward: %.4f gt reward: %.4f avg expr reward: %.4f avg tree reward: %.4f avg gt reward: %.4f' \
             % (batch_idx + i, cur_expr_reward, cur_tree_reward, cur_gt_reward, cum_expr_reward * 1.0 / (batch_idx + i + 1), cum_tree_reward * 1.0 / (batch_idx + i + 1), cum_gt_reward * 1.0 / (batch_idx + i + 1)))
    cum_expr_reward = cum_expr_reward * 1.0 / data_size
    cum_tree_reward = cum_tree_reward * 1.0 / data_size
    cum_gt_reward = cum_gt_reward * 1.0 / data_size
    print(
        'avg search expr reward: %.4f tree reward: %.4f avg gt reward: %.4f' %
        (cum_expr_reward, cum_tree_reward, cum_gt_reward))