def train(self, model, data_loader, batch_size, n_epoch, template_flag, \
                        resume=False, optimizer=None, mode=0, teacher_forcing_ratio=0, post_flag=False):
        self.evaluator = Evaluator(
            vocab_dict=self.vocab_dict,
            vocab_list=self.vocab_list,
            decode_classes_dict=self.decode_classes_dict,
            decode_classes_list=self.decode_classes_list,
            loss=NLLLoss(),
            cuda_use=self.cuda_use)
        if resume:
            checkpoint_path = Checkpoint.get_certain_checkpoint(
                "./experiment", "best")
            resume_checkpoint = Checkpoint.load(checkpoint_path)
            model = resume_checkpoint.model
            self.optimizer = resume_checkpoint.optimizer

            resume_optim = self.optimizer.optimizer
            defaults = resume_optim.param_groups[0]
            defaults.pop('params', None)
            self.optimizer.optimizer = resume_optim.__class__(
                model.parameters(), **defaults)

            start_epoch = resume_checkpoint.epoch
            start_step = resume_checkpoint.step
            self.train_acc_list = resume_checkpoint.train_acc_list
            self.test_acc_list = resume_checkpoint.test_acc_list
            self.loss_list = resume_checkpoint.loss_list
        else:
            start_epoch = 1
            start_step = 0
            self.train_acc_list = []
            self.test_acc_list = []
            self.loss_list = []
            model_opt = NoamOpt(
                512, 1, 2000,
                torch.optim.Adam(model.parameters(),
                                 lr=0,
                                 betas=(0.9, 0.98),
                                 eps=1e-9))
            if optimizer is None:
                optimizer = Optimizer(optim.Adam(model.parameters()),
                                      max_grad_norm=0)
            self.optimizer = model_opt

        self._train_epoches(data_loader=data_loader,
                            model=model,
                            batch_size=batch_size,
                            start_epoch=start_epoch,
                            start_step=start_step,
                            n_epoch=n_epoch,
                            mode=mode,
                            template_flag=template_flag,
                            teacher_forcing_ratio=teacher_forcing_ratio,
                            post_flag=post_flag)
Beispiel #2
0
def step_one_test():

    data_loader = DataLoader(args)

    #Checkpoint.CHECKPOINT_DIR_NAME = "0120_0030"
    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment",
                                   Checkpoint.CHECKPOINT_DIR_NAME, "best")
    checkpoint = Checkpoint.load(checkpoint_path)

    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()

    seq2seq.eval()
    evaluator = Evaluator(vocab_dict=data_loader.vocab_dict,
                          vocab_list=data_loader.vocab_list,
                          decode_classes_dict=data_loader.decode_classes_dict,
                          decode_classes_list=data_loader.decode_classes_list,
                          loss=NLLLoss(),
                          cuda_use=args.cuda_use)
    name = args.run_flag
    if name == 'test_23k':
        test_temp_acc, test_ans_acc = evaluator.evaluate(
            model=seq2seq,
            data_loader=data_loader,
            data_list=data_loader.math23k_test_list,
            template_flag=True,
            batch_size=64,
            evaluate_type=0,
            use_rule=False,
            mode=args.mode,
            post_flag=args.post_flag,
            name_save=name)
    print(test_temp_acc, test_ans_acc)
Beispiel #3
0
def step_three():

    data_loader = DataLoader(args)

    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment",
                                   Checkpoint.CHECKPOINT_DIR_NAME, "best")
    checkpoint = Checkpoint.load(checkpoint_path)

    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()

    seq2seq.eval()
    evaluator = Evaluator(vocab_dict=data_loader.vocab_dict,
                          vocab_list=data_loader.vocab_list,
                          decode_classes_dict=data_loader.decode_classes_dict,
                          decode_classes_list=data_loader.decode_classes_list,
                          loss=NLLLoss(),
                          cuda_use=args.cuda_use)
    test_temp_acc, test_ans_acc = evaluator.evaluate(
        model=seq2seq,
        data_loader=data_loader,
        data_list=data_loader.math57k_data_list,
        template_flag=False,
        batch_size=64,
        evaluate_type=0,
        use_rule=True,
        mode=args.mode)
    print(test_temp_acc, test_ans_acc)
def gen_math57k_error():
    data_loader = DataLoader()
    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment", Checkpoint.CHECKPOINT_DIR_NAME, args.load_name)
    checkpoint = Checkpoint.load(checkpoint_path)

    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()

    seq2seq.eval()
    evaluator = Evaluator(vocab_dict = data_loader.vocab_dict,
                          vocab_list = data_loader.vocab_list,
                          decode_classes_dict = data_loader.decode_classes_dict,
                          decode_classes_list = data_loader.decode_classes_list,
                          loss = NLLLoss(),
                          cuda_use = args.cuda_use)

    evaluator.gen_rl_data(model = seq2seq,
                          data_loader = data_loader,
                          data_list = data_loader.math57k_data_list,
                          template_flag = False,
                          batch_size = 16,
                          evaluate_type = 0,
                          use_rule = True,
                          mode = args.mode,
                          filename = args.load_name)
def gen_best_23_error():
    data_loader = DataLoader()
    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment", Checkpoint.CHECKPOINT_DIR_NAME, 'best')
    checkpoint = Checkpoint.load(checkpoint_path)

    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()

    seq2seq.eval()

    emb_model = seq2seq.encoder.embedding
    emb_np = emb_model.weight.cpu().data.numpy()
    np.save("./data/rl_train_data/emb.npy", emb_np)

    evaluator = Evaluator(vocab_dict = data_loader.vocab_dict,
                          vocab_list = data_loader.vocab_list,
                          decode_classes_dict = data_loader.decode_classes_dict,
                          decode_classes_list = data_loader.decode_classes_list,
                          loss = NLLLoss(),
                          cuda_use = args.cuda_use)

    evaluator.gen_rl_data(model = seq2seq,
                          data_loader = data_loader,
                          data_list = data_loader.math23k_train_list,
                          template_flag = False,
                          batch_size = 16,
                          evaluate_type = 0,
                          use_rule = False,
                          mode = args.mode,
                          filename = args.load_name)
Beispiel #6
0
Datei: main.py Projekt: l-xin/hms
def test(args, test_dataset="test"):
    seq2seq, data_loader = create_model(args)
    resume_checkpoint = Checkpoint.load(model_only=True)
    seq2seq.load_state_dict(resume_checkpoint.model)
    if args.use_cuda:
        seq2seq = seq2seq.cuda()

    evaluator = Evaluator(
        class_dict=data_loader.class_dict,
        class_list=data_loader.class_list,
        use_cuda=args.use_cuda
    )
    if test_dataset == "test":
        test_dataset = data_loader.test_list
    elif test_dataset == "train":
        test_dataset = data_loader.train_list
    seq2seq.eval()
    with torch.no_grad():
        test_temp_acc, test_ans_acc = evaluator.evaluate(
            model=seq2seq,
            data_loader=data_loader,
            data_list=test_dataset,
            template_flag=True,
            template_len=False,
            batch_size=1,
            beam_width=args.beam,
            test_log=args.test_log,
            print_probability=True
        )
    logging.info(f"temp_acc: {test_temp_acc}, ans_acc: {test_ans_acc}")
    return
Beispiel #7
0
Datei: main.py Projekt: l-xin/hms
def init():
    args = get_args()
    if args.use_cuda:
        os.environ["CUDA_VISIBLE_DEVICES"] = args.cuda_id
        if not torch.cuda.is_available():
            args.use_cuda = False

    random.seed(args.seed)
    torch.manual_seed(args.seed)
    if args.use_cuda:
        torch.cuda.manual_seed(args.seed)

    logging.basicConfig(level=logging.INFO, format="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S", filename=args.log)
    logging.info('\n' + '\n'.join([f"\t{'['+k+']':20}\t{v}" for k, v in dict(args._get_kwargs()).items()]))

    checkpoint_path = os.path.join("./experiment", args.checkpoint)
    if not os.path.exists(checkpoint_path):
        logging.info(f'create checkpoint directory {checkpoint_path} ...')
        os.makedirs(checkpoint_path)
    Checkpoint.set_ckpt_path(checkpoint_path)
    return args
Beispiel #8
0
    def train(self,
              model,
              data_loader,
              batch_size,
              n_epoch,
              resume=False,
              optim_lr=1e-3,
              optim_weight_decay=1e-5,
              scheduler_step_size=60,
              scheduler_gamma=0.6):
        start_epoch = 1
        start_step = 0
        max_acc = 0
        self.optimizer = optim.Adam(model.parameters(),
                                    lr=optim_lr,
                                    weight_decay=optim_weight_decay)
        self.scheduler = optim.lr_scheduler.StepLR(
            self.optimizer,
            step_size=scheduler_step_size,
            gamma=scheduler_gamma)
        if resume:
            resume_checkpoint = Checkpoint.load(model_only=False)
            model.load_state_dict(resume_checkpoint.model)
            resume_optimizer = resume_checkpoint.optimizer
            resume_scheduler = resume_checkpoint.scheduler
            if resume_optimizer is not None:
                start_epoch = resume_checkpoint.epoch
                start_step = resume_checkpoint.step
                max_acc = resume_checkpoint.max_acc
                self.optimizer.load_state_dict(resume_optimizer)
                self.scheduler.load_state_dict(resume_scheduler)

        self._train_epoches(data_loader=data_loader,
                            model=model,
                            batch_size=batch_size,
                            start_epoch=start_epoch,
                            start_step=start_step,
                            max_acc=max_acc,
                            n_epoch=n_epoch)
        return
Beispiel #9
0
def step_one_test():
    args = get_args()

    print(args)
    one_quest_test_list = read_data_json("./data/test.json")
    print(one_quest_test_list)
    #Checkpoint.CHECKPOINT_DIR_NAME = "0120_0030"
    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment",
                                   Checkpoint.CHECKPOINT_DIR_NAME, "best")
    print('-----', args.checkpoint_dir_name, checkpoint_path)
    checkpoint = Checkpoint.load(checkpoint_path)
    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()
    data_loader = DataLoader(args)
    seq2seq.eval()
    evaluator = Evaluator(vocab_dict=data_loader.vocab_dict,
                          vocab_list=data_loader.vocab_list,
                          decode_classes_dict=data_loader.decode_classes_dict,
                          decode_classes_list=data_loader.decode_classes_list,
                          loss=NLLLoss(),
                          cuda_use=args.cuda_use)
    name = args.run_flag
    test_temp_acc, test_ans_acc = evaluator.evaluate(
        model=seq2seq,
        data_loader=data_loader,
        data_list=one_quest_test_list,
        template_flag=False,  # todo: 这里改成了false
        batch_size=64,
        evaluate_type=0,
        use_rule=False,
        mode=args.mode,
        post_flag=args.post_flag,
        name_save=name)
    print(test_temp_acc, test_ans_acc)
Beispiel #10
0
def train_and_eval(hparams):
  model = get_model(hparams)
  optimizer = keras.optimizers.Adam(lr=hparams.learning_rate)
  loss_fn = keras.losses.SparseCategoricalCrossentropy()
  logger = Logger(hparams, optimizer)

  train_dataset = get_dataset(hparams, train=True)
  eval_dataset = get_dataset(hparams, train=False)

  checkpoint = Checkpoint(hparams, optimizer, model)

  checkpoint.restore()

  for epoch in range(hparams.epochs):

    start = time.time()

    for images, labels in train_dataset:
      loss, predictions = train_step(images, labels, model, optimizer, loss_fn)
      logger.log_progress(loss, labels, predictions, mode='train')

    elapse = time.time() - start

    logger.write_scalars(mode='train')

    for images, labels in eval_dataset:
      logger.write_images(images, mode='eval')
      loss, predictions = eval_step(images, labels, model, loss_fn)
      logger.log_progress(loss, labels, predictions, mode='eval')

    logger.write_scalars(mode='eval', elapse=elapse)

    logger.print_progress(epoch, elapse)

    if epoch % 5 == 0 or epoch == hparams.epochs - 1:
      checkpoint.save()

  tf.keras.models.save_model(model, filepath=hparams.save_model)
  print('model saved at %s' % hparams.save_model)
Beispiel #11
0
    def __init__(self, env, render, config_info):
        self.env = env
        self._reset_env()
        self.render = render

        # Set seeds
        self.seed = 0
        env.seed(self.seed)
        torch.manual_seed(self.seed)
        np.random.seed(self.seed)

        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
        print(f"Device in use : {self.device}")

        # Define checkpoint
        checkpoint = Checkpoint(self.device, **config_info)

        # Create / load checkpoint dict
        (
            self.ckpt,
            self.path_ckpt_dict,
            self.path_ckpt,
            config,
        ) = checkpoint.manage_checkpoint()

        # Unroll useful parameters from config dict
        self.batch_size = config["training"]["batch_size"]
        self.max_timesteps = config["training"]["max_timesteps"]
        self.replay_size = config["training"]["replay_size"]
        self.start_temp = config["training"]["start_temperature"]
        self.final_temp = config["training"]["final_temperature"]
        self.decay_temp = config["training"]["decay_temperature"]
        self.gamma = config["training"]["gamma"]
        self.early_stopping = config["training"]["early_stopping"]
        self.update_frequency = config["training"]["update_frequency"]
        self.eval_frequency = config["training"]["eval_frequency"]

        # Define state and action dimension spaces
        state_dim = env.observation_space.shape[0]
        action_dim = env.action_space.n

        # Define Q-network and target Q-network
        self.network = DQN(state_dim, action_dim, **config["model"]).to(self.device)
        self.target_network = DQN(state_dim, action_dim, **config["model"]).to(
            self.device
        )

        # Loss and optimizer
        self.criterion = nn.MSELoss()
        lr = config["optimizer"]["learning_rate"]
        self.optimizer = optim.Adam(self.network.parameters(), lr=lr)

        # Load network's weight if resume training
        checkpoint.load_weights(
            self.ckpt, self.network, self.target_network, self.optimizer
        )

        # Initialize replay buffer
        self.replay_buffer = ReplayBuffer(self.replay_size)

        self.transition = namedtuple(
            "transition",
            field_names=["state", "action", "reward", "done", "next_state"],
        )
Beispiel #12
0
    def _train_epoches(self, data_loader, model, batch_size, start_epoch, start_step, n_epoch, \
                            mode, template_flag, teacher_forcing_ratio, post_flag):
        print_loss_total = 0

        train_list = data_loader.math23k_train_list
        test_list = data_loader.math23k_test_list
        valid_list = data_loader.math23k_valid_list
        train_list = train_list + valid_list# = data_loader.math23k_valid_list
        steps_per_epoch = len(train_list)/batch_size
        total_steps = steps_per_epoch * n_epoch

        step = start_step
        step_elapsed = 0

        threshold = [0]+[1]*9

        max_ans_acc = 0 

        for epoch in range(start_epoch, n_epoch + 1):
            epoch_loss_total = 0

            #marker if self.teacher_schedule:

            batch_generator = data_loader.get_batch(train_list, batch_size, True)
            print("shuffle")
            
            right_count = 0
            match = 0
            total_m = 0
            total_r = 0

            model.train(True)
            for batch_data_dict in batch_generator:
                step += 1
                step_elapsed += 1

                input_variables = batch_data_dict['batch_encode_pad_idx']
                input_lengths = batch_data_dict['batch_encode_len']
                target_variables = batch_data_dict['batch_decode_pad_idx']
                target_lengths = batch_data_dict['batch_decode_len']

                #cuda
                input_variables = Variable(torch.LongTensor(input_variables))
                target_variables = Variable(torch.LongTensor(target_variables))

                if self.cuda_use:
                    input_variables = input_variables.cuda()
                    target_variables = target_variables.cuda()

                loss, com_list = self._train_batch(input_variables = input_variables, 
                                                   input_lengths = input_lengths, 
                                                   target_variables = target_variables, 
                                                   target_lengths = target_lengths, 
                                                   model = model, 
                                                   template_flag = template_flag,
                                                   teacher_forcing_ratio = teacher_forcing_ratio,
                                                   mode = mode, 
                                                   batch_size = batch_size,
                                                   post_flag = post_flag)


                right_count += com_list[0]
                total_r += batch_size

                match += com_list[1]
                total_m += com_list[2]

                print_loss_total += loss
                epoch_loss_total += loss

                if step % self.print_every == 0 and step_elapsed > self.print_every:
                    print_loss_avg = print_loss_total / self.print_every
                    print_loss_total = 0
                    print ('step: %d, Progress: %d%%, Train %s: %.4f, Teacher_r: %.2f' % (
                           step,
                           step*1.0 / total_steps * 100,
                           self.loss.name,
                           print_loss_avg,
                           teacher_forcing_ratio))


            model.eval()
            train_temp_acc, train_ans_acc,pg_train_list =\
                                        self.evaluator.evaluate(model = model,
                                                                data_loader = data_loader,
                                                                data_list = train_list,
                                                                template_flag = True,
                                                                batch_size = batch_size,
                                                                evaluate_type = 0,
                                                                use_rule = False,
                                                                mode = mode,
                                                                post_flag=post_flag)
            #valid_temp_acc, valid_ans_acc =\
            #                            self.evaluator.evaluate(model = model,
            #                                                    data_loader = data_loader,
            #                                                    data_list = valid_list,
            #                                                    template_flag = True,
            #                                                    batch_size = batch_size,
            #                                                    evaluate_type = 0,
            #                                                    use_rule = False,
            #                                                    mode = mode,
            #                                                    post_flag=post_flag)
            test_temp_acc, test_ans_acc,pg_test_list =\
                                        self.evaluator.evaluate(model = model,
                                                                data_loader = data_loader,
                                                                data_list = test_list,
                                                                template_flag = True,
                                                                batch_size = batch_size,
                                                                evaluate_type = 0,
                                                                use_rule = False,
                                                                mode = mode,
                                                                post_flag=post_flag)
            self.train_acc_list.append((epoch, step, train_ans_acc))
            self.test_acc_list.append((epoch, step, test_ans_acc))
            self.loss_list.append((epoch, epoch_loss_total/steps_per_epoch))

            if test_ans_acc > max_ans_acc:
                max_ans_acc = test_ans_acc
                th_checkpoint = Checkpoint(model=model,
                                           optimizer=self.optimizer,
                                           epoch=epoch,
                                           step=step,
                                           train_acc_list = self.train_acc_list,
                                           test_acc_list = self.test_acc_list,
                                           loss_list = self.loss_list).\
                                            save_according_name("./experiment", 'best')
                with open("pg_train_list.txt","w") as f:
                    json.dump(pg_train_list, f)

                with open("pg_test_list.txt","w") as f:
                    json.dump(pg_test_list, f)


            print ("Epoch: %d, Step: %d, train_acc: %.2f, %.2f, test_acc: %.2f, %.2f"\
                  % (epoch, step, train_temp_acc, train_ans_acc, test_temp_acc, test_ans_acc))
            print ("Epoch: %d, Step: %d, train_acc: %.2f, %.2f, test_acc: %.2f, %.2f"\
                  % (epoch, step, train_temp_acc, train_ans_acc, test_temp_acc, test_ans_acc))
Beispiel #13
0
    def _train_epoches(self, data_loader, model, batch_size, start_epoch,
                       start_step, max_acc, n_epoch):
        train_list = data_loader.train_list
        test_list = data_loader.test_list

        step = start_step
        print_loss_total = 0
        max_ans_acc = max_acc

        for epoch_index, epoch in enumerate(range(start_epoch, n_epoch + 1)):
            model.train()
            batch_generator = data_loader.get_batch(train_list,
                                                    batch_size,
                                                    template_flag=True)
            for batch_data_dict in batch_generator:
                step += 1
                input_variables = batch_data_dict['batch_span_encode_idx']
                input_lengths = batch_data_dict['batch_span_encode_len']
                span_length = batch_data_dict['batch_span_len']
                tree = batch_data_dict["batch_tree"]

                input_variables = [
                    torch.LongTensor(input_variable)
                    for input_variable in input_variables
                ]
                input_lengths = [
                    torch.LongTensor(input_length)
                    for input_length in input_lengths
                ]
                span_length = torch.LongTensor(span_length)
                if self.use_cuda:
                    input_variables = [
                        input_variable.cuda()
                        for input_variable in input_variables
                    ]
                    input_lengths = [
                        input_length.cuda() for input_length in input_lengths
                    ]
                    span_length = span_length.cuda()

                span_num_pos = batch_data_dict["batch_span_num_pos"]
                word_num_poses = batch_data_dict["batch_word_num_poses"]
                span_num_pos = torch.LongTensor(span_num_pos)
                word_num_poses = [
                    torch.LongTensor(word_num_pos)
                    for word_num_pos in word_num_poses
                ]
                if self.use_cuda:
                    span_num_pos = span_num_pos.cuda()
                    word_num_poses = [
                        word_num_pose.cuda()
                        for word_num_pose in word_num_poses
                    ]
                num_pos = (span_num_pos, word_num_poses)

                target_variables = batch_data_dict['batch_decode_idx']
                target_variables = torch.LongTensor(target_variables)
                if self.use_cuda:
                    target_variables = target_variables.cuda()

                loss = self._train_batch(input_variables=input_variables,
                                         num_pos=num_pos,
                                         input_lengths=input_lengths,
                                         span_length=span_length,
                                         target_variables=target_variables,
                                         tree=tree,
                                         model=model,
                                         batch_size=batch_size)

                print_loss_total += loss
                if step % self.print_every == 0:
                    print_loss_avg = print_loss_total / self.print_every
                    print_loss_total = 0
                    logging.info(
                        f'step: {step}, Train loss: {print_loss_avg:.4f}')
                    if self.use_cuda:
                        torch.cuda.empty_cache()
            self.scheduler.step()

            model.eval()
            with torch.no_grad():
                test_temp_acc, test_ans_acc = self.evaluator.evaluate(
                    model=model,
                    data_loader=data_loader,
                    data_list=test_list,
                    template_flag=True,
                    template_len=True,
                    batch_size=batch_size,
                )
                if epoch_index % self.test_train_every == 0:
                    train_temp_acc, train_ans_acc = self.evaluator.evaluate(
                        model=model,
                        data_loader=data_loader,
                        data_list=train_list,
                        template_flag=True,
                        template_len=True,
                        batch_size=batch_size,
                    )

                    logging.info(
                        f"Epoch: {epoch}, Step: {step}, test_acc: {test_temp_acc:.3f}, {test_ans_acc:.3f}, train_acc: {train_temp_acc:.3f}, {train_ans_acc:.3f}"
                    )
                else:
                    logging.info(
                        f"Epoch: {epoch}, Step: {step}, test_acc: {test_temp_acc:.3f}, {test_ans_acc:.3f}"
                    )

            if test_ans_acc > max_ans_acc:
                max_ans_acc = test_ans_acc
                logging.info("saving checkpoint ...")
                Checkpoint.save(epoch=epoch,
                                step=step,
                                max_acc=max_ans_acc,
                                model=model,
                                optimizer=self.optimizer,
                                scheduler=self.scheduler,
                                best=True)
            else:
                Checkpoint.save(epoch=epoch,
                                step=step,
                                max_acc=max_ans_acc,
                                model=model,
                                optimizer=self.optimizer,
                                scheduler=self.scheduler,
                                best=False)
        return