def train(args):
    print('Training:')

    train_data = data_utils.load_dataset(args.train_dataset, args)
    train_data_size = len(train_data)
    if args.train_proportion < 1.0:
        random.shuffle(train_data)
        train_data_size = int(train_data_size * args.train_proportion)
        train_data = train_data[:train_data_size]

    eval_data = data_utils.load_dataset(args.val_dataset, args)

    DataProcessor = data_utils.jspDataProcessor(args)
    model_supervisor = create_model(args)

    if args.resume:
        resume_step = True
    else:
        resume_step = False
    resume_idx = args.resume * args.batch_size

    logger = model_utils.Logger(args)
    if args.resume:
        logs = pd.read_csv("../logs/" + args.log_name)
        for index, log in logs.iterrows():
            val_summary = {
                'avg_reward': log['avg_reward'],
                'global_step': log['global_step']
            }
            logger.write_summary(val_summary)

    for epoch in range(resume_idx // train_data_size, args.num_epochs):
        random.shuffle(train_data)
        for batch_idx in range(0 + resume_step * resume_idx % train_data_size,
                               train_data_size, args.batch_size):
            resume_step = False
            print(epoch, batch_idx)
            batch_data = DataProcessor.get_batch(train_data, args.batch_size,
                                                 batch_idx)
            train_loss, train_reward = model_supervisor.train(batch_data)
            print('train loss: %.4f train reward: %.4f' %
                  (train_loss, train_reward))

            if (args.resume +
                    model_supervisor.global_step) % args.eval_every_n == 0:
                eval_loss, eval_reward = model_supervisor.eval(
                    eval_data, args.output_trace_flag, args.max_eval_size)
                val_summary = {
                    'avg_reward': eval_reward,
                    'global_step': args.resume + model_supervisor.global_step
                }
                logger.write_summary(val_summary)
                model_supervisor.save_model()

            if args.lr_decay_steps is not None and (
                    args.resume +
                    model_supervisor.global_step) % args.lr_decay_steps == 0:
                model_supervisor.model.lr_decay(args.lr_decay_rate)
                if model_supervisor.model.cont_prob > 0.01:
                    model_supervisor.model.cont_prob *= 0.5
Example #2
0
def train(args):
    print('Training:')

    data_processor = data_utils.DataProcessor(args)
    train_data = data_processor.load_data(args.train_dataset)
    train_data, train_indices = data_processor.preprocess(train_data)
    dev_data = data_processor.load_data(args.dev_dataset)
    dev_data, dev_indices = data_processor.preprocess(dev_data)

    train_data_size = len(train_data)
    args.word_vocab_size = data_processor.word_vocab_size
    args.code_vocab_size = data_processor.code_vocab_size
    model_supervisor = create_model(args, data_processor.word_vocab,
                                    data_processor.code_vocab)

    logger = model_utils.Logger(args)

    for epoch in range(args.num_epochs):
        random.shuffle(train_data)
        for batch_idx in range(0, train_data_size, args.batch_size):
            print(epoch, batch_idx)
            batch_input, batch_labels = data_processor.get_batch(
                train_data, args.batch_size, batch_idx)
            train_loss, train_acc = model_supervisor.train(
                batch_input, batch_labels)
            print('train loss: %.4f train acc: %.4f' % (train_loss, train_acc))

            if model_supervisor.global_step % args.eval_every_n == 0:
                model_supervisor.save_model()
                eval_loss, eval_label_acc, eval_data_acc, eval_acc, pred_labels = model_supervisor.eval(
                    dev_data, args.data_order_invariant, args.max_eval_size)
                val_summary = {
                    'train_loss': train_loss,
                    'train_acc': train_acc,
                    'eval_loss': eval_loss,
                    'eval_label_acc': eval_label_acc,
                    'eval_data_acc': eval_data_acc,
                    'eval_acc': eval_acc
                }
                val_summary['global_step'] = model_supervisor.global_step
                logger.write_summary(val_summary)

            if args.lr_decay_steps is not None and model_supervisor.global_step % args.lr_decay_steps == 0:
                model_supervisor.model.lr_decay(args.lr_decay_rate)
Example #3
0
def train(args):
	print('Training:')
	
	train_data = data_utils.load_dataset(args.train_dataset, args)
	eval_data = data_utils.load_dataset(args.val_dataset, args)

	DataProcessor = data_utils.HalideDataProcessor()

	if args.train_proportion < 1.0:
		random.shuffle(train_data)
		train_data_size = int(train_data_size * args.train_proportion)
		train_data = train_data[:train_data_size]

	if args.train_max_len is not None:
		train_data = DataProcessor.prune_dataset(train_data, max_len=args.train_max_len)

	train_data_size = len(train_data)
	term_vocab, term_vocab_list = DataProcessor.load_term_vocab()
	op_vocab, op_vocab_list = DataProcessor.load_ops()
	args.term_vocab_size = len(term_vocab)
	args.op_vocab_size = len(op_vocab)
	model_supervisor = create_model(args, term_vocab, term_vocab_list, op_vocab, op_vocab_list)
	
	logger = model_utils.Logger(args)

	for epoch in range(args.num_epochs):
		random.shuffle(train_data)
		for batch_idx in range(0, train_data_size, args.batch_size):
			print(epoch, batch_idx)
			batch_data = DataProcessor.get_batch(train_data, args.batch_size, batch_idx)
			train_loss, train_reward = model_supervisor.train(batch_data)
			print('train loss: %.4f train reward: %.4f' % (train_loss, train_reward))

			if model_supervisor.global_step % args.eval_every_n == 0:
				eval_loss, eval_reward = model_supervisor.eval(eval_data, args.output_trace_flag, args.output_trace_option, args.output_trace_file, args.max_eval_size)
				val_summary = {'avg_reward': eval_reward}
				model_supervisor.save_model()
				val_summary['global_step'] = model_supervisor.global_step
				logger.write_summary(val_summary)

			if args.lr_decay_steps is not None and model_supervisor.global_step % args.lr_decay_steps == 0:
				model_supervisor.model.lr_decay(args.lr_decay_rate)
				if model_supervisor.model.cont_prob > 0.01:
					model_supervisor.model.cont_prob *= 0.5