Ejemplo n.º 1
0
def step_three():

    data_loader = DataLoader(args)

    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment",
                                   Checkpoint.CHECKPOINT_DIR_NAME, "best")
    checkpoint = Checkpoint.load(checkpoint_path)

    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()

    seq2seq.eval()
    evaluator = Evaluator(vocab_dict=data_loader.vocab_dict,
                          vocab_list=data_loader.vocab_list,
                          decode_classes_dict=data_loader.decode_classes_dict,
                          decode_classes_list=data_loader.decode_classes_list,
                          loss=NLLLoss(),
                          cuda_use=args.cuda_use)
    test_temp_acc, test_ans_acc = evaluator.evaluate(
        model=seq2seq,
        data_loader=data_loader,
        data_list=data_loader.math57k_data_list,
        template_flag=False,
        batch_size=64,
        evaluate_type=0,
        use_rule=True,
        mode=args.mode)
    print(test_temp_acc, test_ans_acc)
Ejemplo n.º 2
0
def step_one_test():

    data_loader = DataLoader(args)

    #Checkpoint.CHECKPOINT_DIR_NAME = "0120_0030"
    Checkpoint.CHECKPOINT_DIR_NAME = args.checkpoint_dir_name
    checkpoint_path = os.path.join("./experiment",
                                   Checkpoint.CHECKPOINT_DIR_NAME, "best")
    checkpoint = Checkpoint.load(checkpoint_path)

    seq2seq = checkpoint.model
    if args.cuda_use:
        seq2seq = seq2seq.cuda()

    seq2seq.eval()
    evaluator = Evaluator(vocab_dict=data_loader.vocab_dict,
                          vocab_list=data_loader.vocab_list,
                          decode_classes_dict=data_loader.decode_classes_dict,
                          decode_classes_list=data_loader.decode_classes_list,
                          loss=NLLLoss(),
                          cuda_use=args.cuda_use)
    name = args.run_flag
    if name == 'test_23k':
        test_temp_acc, test_ans_acc = evaluator.evaluate(
            model=seq2seq,
            data_loader=data_loader,
            data_list=data_loader.math23k_test_list,
            template_flag=True,
            batch_size=64,
            evaluate_type=0,
            use_rule=False,
            mode=args.mode,
            post_flag=args.post_flag,
            name_save=name)
    print(test_temp_acc, test_ans_acc)
Ejemplo n.º 3
0
Archivo: main.py Proyecto: l-xin/hms
def test(args, test_dataset="test"):
    seq2seq, data_loader = create_model(args)
    resume_checkpoint = Checkpoint.load(model_only=True)
    seq2seq.load_state_dict(resume_checkpoint.model)
    if args.use_cuda:
        seq2seq = seq2seq.cuda()

    evaluator = Evaluator(
        class_dict=data_loader.class_dict,
        class_list=data_loader.class_list,
        use_cuda=args.use_cuda
    )
    if test_dataset == "test":
        test_dataset = data_loader.test_list
    elif test_dataset == "train":
        test_dataset = data_loader.train_list
    seq2seq.eval()
    with torch.no_grad():
        test_temp_acc, test_ans_acc = evaluator.evaluate(
            model=seq2seq,
            data_loader=data_loader,
            data_list=test_dataset,
            template_flag=True,
            template_len=False,
            batch_size=1,
            beam_width=args.beam,
            test_log=args.test_log,
            print_probability=True
        )
    logging.info(f"temp_acc: {test_temp_acc}, ans_acc: {test_ans_acc}")
    return
Ejemplo n.º 4
0
def test(net, test_loader, epoch):
    net.eval()
    test_step = len(test_loader)
    print('\nEvaluating...')
    with torch.no_grad():
        evaluator = Evaluator()
        for i, sample in enumerate(test_loader):
            for key in sample:
                sample[key] = sample[key].cuda()
            output = net(sample)
            evaluator.add(output, sample)
            if (i + 1) % 100 == 0:
                print('Val Step [{}/{}]'.format(i + 1, test_step))

        results = evaluator.evaluate()
        print('Epoch {}/{}'.format(epoch + 1, args.epoch))
        print(
            '|  L.Collar  |  R.Collar  |  L.Sleeve  |  R.Sleeve  |   L.Waist  |   R.Waist  |    L.Hem   |   R.Hem    |     ALL    |'
        )
        print(
            '|   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |'
            .format(results['lm_dist'][0], results['lm_dist'][1],
                    results['lm_dist'][2], results['lm_dist'][3],
                    results['lm_dist'][4], results['lm_dist'][5],
                    results['lm_dist'][6], results['lm_dist'][7],
                    results['lm_dist_all']))
Ejemplo n.º 5
0
def evaluate(split='valid'):
    y_true = eval(split + '_data')
    # Ttensor = eval(split + '_tensor')
    model.eval()
    y_score, _, _ = model(train_tensor)
    y_score.detach_()
    y_score = y_score.squeeze(0)
    y_score[train_data.row, train_data.col] = 0
    _, rec_items = torch.topk(y_score, args.N, dim=1)
    # y_pred = torch.gather(Ttensor, 1, rec_items).cpu().numpy()
    run = sort2query(rec_items[:, 0:args.N])
    test = csr2test(y_true.tocsr())
    evaluator = Evaluator({'recall', 'map_cut'})
    evaluator.evaluate(run, test)
    result = evaluator.show([
        'recall_5', 'recall_10', 'recall_15', 'recall_20', 'map_cut_5',
        'map_cut_10', 'map_cut_15', 'map_cut_20'
    ])
    print(result)
Ejemplo n.º 6
0
def test(net, test_loader, epoch):
    net.eval()
    test_step = len(test_loader)
    print('\nEvaluating...')
    with torch.no_grad():
        evaluator = Evaluator()
        for i, sample in enumerate(test_loader):
            for key in sample:
                if key[0:2] != 'D_':
                    sample[key] = sample[key].cuda()
            output = net(sample)
            evaluator.add(output, sample)
            if (i + 1) % 100 == 0:
                print('Val Step [{}/{}]'.format(i + 1, test_step))  # j + 1 ?

        results = evaluator.evaluate()
        print('Epoch {}/{}'.format(epoch + 1, args.epoch))
        print(
            '|  L.Collar  |  R.Collar  |  L.Sleeve  |  R.Sleeve  |   L.Waist  |   R.Waist  |    L.Hem   |   R.Hem    |     ALL    |'
        )
        print(
            '|   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |'
            .format(results['lm_dist'][0], results['lm_dist'][1],
                    results['lm_dist'][2], results['lm_dist'][3],
                    results['lm_dist'][4], results['lm_dist'][5],
                    results['lm_dist'][6], results['lm_dist'][7],
                    results['lm_dist_all']))
        file = open('results_lr_%.4f_base_%d_de_%d_g_%.2f.txt'% \
                    (args.learning_rate, \
                     args.base_epoch, \
                     args.decay_epoch, \
                     args.gamma), 'a')
        file.write('Epoch {}\n'.format(args.base_epoch + epoch + 1))
        file.write(
            '|  L.Collar  |  R.Collar  |  L.Sleeve  |  R.Sleeve  |   L.Waist  |   R.Waist  |    L.Hem   |   R.Hem    |     ALL    |\n'
        )
        file.write(
            '|   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |\n\n'
            .format(results['lm_dist'][0], results['lm_dist'][1],
                    results['lm_dist'][2], results['lm_dist'][3],
                    results['lm_dist'][4], results['lm_dist'][5],
                    results['lm_dist'][6], results['lm_dist'][7],
                    results['lm_dist_all']))
        file.close()
Ejemplo n.º 7
0
def test(net, test_loader, epoch):
    net.eval()
    test_step = len(test_loader)
    print('\nEvaluating...')
    with torch.no_grad():
        evaluator = Evaluator()
        for i, (sample, img) in enumerate(test_loader):
            #import matplotlib.pyplot as plt
            #print(len(sample['image_original']))
            #plt.imshow(sample['image_original'].numpy().swapaxes(0, 2))
            #plt.show()

            #image_name = sample['image_name']
            #print(image_name)

            for key in sample:
                if key != 'image_name':
                    sample[key] = sample[key].cuda()
            output = net(sample)
            #print(type(output))
            pos_map = output['lm_pos_map']
            #print(pos_map.size())
            #resh = pos_map.reshape(50, 8, -1)
            #print(resh.size())
            evaluator.add(output, sample, img)
            if (i + 1) % 100 == 0:
                print('Val Step [{}/{}]'.format(i + 1, test_step))

        results = evaluator.evaluate()
        print('Epoch {}/{}'.format(epoch + 1, args.epoch))
        print(
            '|  L.Collar  |  R.Collar  |  L.Sleeve  |  R.Sleeve  |   L.Waist  |   R.Waist  |    L.Hem   |   R.Hem    |     ALL    |'
        )
        print(
            '|   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |   {:.5f}  |'
            .format(results['lm_dist'][0], results['lm_dist'][1],
                    results['lm_dist'][2], results['lm_dist'][3],
                    results['lm_dist'][4], results['lm_dist'][5],
                    results['lm_dist'][6], results['lm_dist'][7],
                    results['lm_dist_all']))
Ejemplo n.º 8
0
def test(net, test_loader, epoch):
    net.eval()
    test_step = len(test_loader)
    print('\nEvaluating...')
    with torch.no_grad():
        evaluator = Evaluator()
        pbar2 = tqdm(test_loader)
        for i, sample in enumerate(pbar2):
            iter_start_time = time.time()
            for key in sample:
                sample[key] = sample[key].cuda()

            output = net(sample)
            evaluator.add(output, sample)
            t = time.time() - iter_start_time
            if (i + 1) % 100 == 0:
                tqdm.write('Val Step [{}/{}],  Time:{:.3f}'.format(
                    i + 1, test_step, t))

        results = evaluator.evaluate()
        print('Epoch {}/{}'.format(epoch + 1, args.epoch))
        print('lm_dist_all: {:.5f} '.format(results['lm_dist_all']))
Ejemplo n.º 9
0
if __name__ == "__main__":
    norm_layer = torch.nn.modules.instancenorm.InstanceNorm2d
    model = G(3, 3, 64,norm_layer=norm_layer , n_blocks=9)
    device = 0

    weights = "./weights/cyclegan/stb_net_G.pth"

    cpm2d = './weights/cpm/stb_hpm2d.pth'
    cpm3d = './weights/cpm/stb_hpm3d.pth'
    evaluate = Evaluator(model, weights, cpm2d, cpm3d, device)
    opt = edict()
    opt.dataroot = "./dataset/stb-dataset/test"
    opt.isTrain = False
    dataset = STBdataset(opt)
    for i in tqdm(range(len(dataset))):
        sample = dataset[i]
        evaluate.feed(sample['xyz'], sample['A']*255.0)
    results = evaluate.evaluate()
    print(f"STB results: {results}")

    opt.dataroot = "./dataset/rhd-dataset/test"
    weights = "./weights/cyclegan/rhd_net_G.pth"
    cpm2d = "./weights/cpm/rhd_hpm2d.pth"
    dataset = RHDdataset(opt)
    evaluate = Evaluator(model, weights, cpm2d, cpm3d, device)
    for i in tqdm(range(len(dataset))):
        sample = dataset[i]
        evaluate.feed(sample['xyz'], sample['A']*255.0)
    results = evaluate.evaluate()
    print(f"rhd results: {results}")
Ejemplo n.º 10
0
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    for epoch in range(1, args.maxiter + 1):
        train(epoch)
        # test()

    model.eval()
    score, _, _ = model(Rtensor)
    score = score.squeeze(0)
    score[R.row, R.col] = 0
    _, idx = torch.sort(score, 1, True)

    run = sort2query(idx[:, 0:args.N])
    test = csr2test(T.tocsr())
    evaluator = Evaluator({'recall'})
    evaluator.evaluate(run, test)
    result = evaluator.show(
        ['recall_5', 'recall_10', 'recall_15', 'recall_20'])
    print(result)
    line = 'cVAE\t{}\t{}\t{}\t{}\t0'.format(args.data, args.alpha, args.beta, len(args.layer))
    for _, value in result.items():
        line += '\t{:.5f}'.format(value)
    line += '\r\n'
    file = open('result', 'a')
    file.write(line)
    file.close()

    if args.save:
        name = 'cvae' if args.rating else 'fvae'
        path = directory + '/model/' + name
        for l in args.layer:
Ejemplo n.º 11
0
    # For parallel evaluation
    print('[+] Initializing evaluators')
    evaluators = Evaluator(config)
    # [Evaluator(config) for _ in range(2 * N)]

    # First evaluation (sequential)
    # delayed_functions = []
    # for i in range(2 * N):
    #     sf = skill_factor[i]
    #     delayed_functions.append(delayed(evaluators[i].evaluate)(population[i], sf))
    # fitnesses = Parallel(n_jobs=cpu_count())(delayed_functions)
    # for i in range(2 * N):
    #     sf = skill_factor[i]
    #     factorial_cost[i, sf] = fitnesses[i]
    # scalar_fitness = calculate_scalar_fitness(factorial_cost)
    fitnesses = [evaluators.evaluate(x, 0) for x in population]

    # Evolve
    #     iterator = trange(T)
    iterator = trange(10)
    for t in iterator:

        # permute current population
        permutation_index = np.random.permutation(N)
        population[:N] = population[:N][permutation_index]
        # skill_factor[:N] = skill_factor[:N][permutation_index]
        # factorial_cost[:N] = factorial_cost[:N][permutation_index]
        # factorial_cost[N:] = np.inf

        # As we consider all the population as parents, we don't samplt P^{s}
        if t % TrInt == 0:
Ejemplo n.º 12
0
class SupervisedTrainer(object):
    def __init__(self, class_dict, class_list, use_cuda):
        self.test_train_every = 10
        self.print_every = 30
        self.use_cuda = use_cuda

        self.pad_idx_in_class = class_dict['PAD_token']

        loss_weight = torch.ones(len(class_dict))
        loss_weight[self.pad_idx_in_class] = 0
        self.loss = nn.NLLLoss(weight=loss_weight, reduction="sum")
        if use_cuda:
            self.loss = self.loss.cuda()

        self.evaluator = Evaluator(class_dict=class_dict,
                                   class_list=class_list,
                                   use_cuda=use_cuda)
        return

    def _train_batch(self, input_variables, num_pos, input_lengths,
                     span_length, target_variables, tree, model, batch_size):
        decoder_outputs, _, _ = model(input_variable=input_variables,
                                      num_pos=num_pos,
                                      input_lengths=input_lengths,
                                      span_length=span_length,
                                      target_variable=target_variables,
                                      tree=tree)

        batch_size = span_length.size(0)

        # loss
        loss = 0
        for step, step_output in enumerate(decoder_outputs):
            loss += self.loss(step_output.contiguous().view(batch_size, -1),
                              target_variables[:, step].view(-1))

        total_target_length = (target_variables !=
                               self.pad_idx_in_class).sum().item()
        loss = loss / total_target_length

        model.zero_grad()
        loss.backward()
        self.optimizer.step()
        return loss.item()

    def _train_epoches(self, data_loader, model, batch_size, start_epoch,
                       start_step, max_acc, n_epoch):
        train_list = data_loader.train_list
        test_list = data_loader.test_list

        step = start_step
        print_loss_total = 0
        max_ans_acc = max_acc

        for epoch_index, epoch in enumerate(range(start_epoch, n_epoch + 1)):
            model.train()
            batch_generator = data_loader.get_batch(train_list,
                                                    batch_size,
                                                    template_flag=True)
            for batch_data_dict in batch_generator:
                step += 1
                input_variables = batch_data_dict['batch_span_encode_idx']
                input_lengths = batch_data_dict['batch_span_encode_len']
                span_length = batch_data_dict['batch_span_len']
                tree = batch_data_dict["batch_tree"]

                input_variables = [
                    torch.LongTensor(input_variable)
                    for input_variable in input_variables
                ]
                input_lengths = [
                    torch.LongTensor(input_length)
                    for input_length in input_lengths
                ]
                span_length = torch.LongTensor(span_length)
                if self.use_cuda:
                    input_variables = [
                        input_variable.cuda()
                        for input_variable in input_variables
                    ]
                    input_lengths = [
                        input_length.cuda() for input_length in input_lengths
                    ]
                    span_length = span_length.cuda()

                span_num_pos = batch_data_dict["batch_span_num_pos"]
                word_num_poses = batch_data_dict["batch_word_num_poses"]
                span_num_pos = torch.LongTensor(span_num_pos)
                word_num_poses = [
                    torch.LongTensor(word_num_pos)
                    for word_num_pos in word_num_poses
                ]
                if self.use_cuda:
                    span_num_pos = span_num_pos.cuda()
                    word_num_poses = [
                        word_num_pose.cuda()
                        for word_num_pose in word_num_poses
                    ]
                num_pos = (span_num_pos, word_num_poses)

                target_variables = batch_data_dict['batch_decode_idx']
                target_variables = torch.LongTensor(target_variables)
                if self.use_cuda:
                    target_variables = target_variables.cuda()

                loss = self._train_batch(input_variables=input_variables,
                                         num_pos=num_pos,
                                         input_lengths=input_lengths,
                                         span_length=span_length,
                                         target_variables=target_variables,
                                         tree=tree,
                                         model=model,
                                         batch_size=batch_size)

                print_loss_total += loss
                if step % self.print_every == 0:
                    print_loss_avg = print_loss_total / self.print_every
                    print_loss_total = 0
                    logging.info(
                        f'step: {step}, Train loss: {print_loss_avg:.4f}')
                    if self.use_cuda:
                        torch.cuda.empty_cache()
            self.scheduler.step()

            model.eval()
            with torch.no_grad():
                test_temp_acc, test_ans_acc = self.evaluator.evaluate(
                    model=model,
                    data_loader=data_loader,
                    data_list=test_list,
                    template_flag=True,
                    template_len=True,
                    batch_size=batch_size,
                )
                if epoch_index % self.test_train_every == 0:
                    train_temp_acc, train_ans_acc = self.evaluator.evaluate(
                        model=model,
                        data_loader=data_loader,
                        data_list=train_list,
                        template_flag=True,
                        template_len=True,
                        batch_size=batch_size,
                    )

                    logging.info(
                        f"Epoch: {epoch}, Step: {step}, test_acc: {test_temp_acc:.3f}, {test_ans_acc:.3f}, train_acc: {train_temp_acc:.3f}, {train_ans_acc:.3f}"
                    )
                else:
                    logging.info(
                        f"Epoch: {epoch}, Step: {step}, test_acc: {test_temp_acc:.3f}, {test_ans_acc:.3f}"
                    )

            if test_ans_acc > max_ans_acc:
                max_ans_acc = test_ans_acc
                logging.info("saving checkpoint ...")
                Checkpoint.save(epoch=epoch,
                                step=step,
                                max_acc=max_ans_acc,
                                model=model,
                                optimizer=self.optimizer,
                                scheduler=self.scheduler,
                                best=True)
            else:
                Checkpoint.save(epoch=epoch,
                                step=step,
                                max_acc=max_ans_acc,
                                model=model,
                                optimizer=self.optimizer,
                                scheduler=self.scheduler,
                                best=False)
        return

    def train(self,
              model,
              data_loader,
              batch_size,
              n_epoch,
              resume=False,
              optim_lr=1e-3,
              optim_weight_decay=1e-5,
              scheduler_step_size=60,
              scheduler_gamma=0.6):
        start_epoch = 1
        start_step = 0
        max_acc = 0
        self.optimizer = optim.Adam(model.parameters(),
                                    lr=optim_lr,
                                    weight_decay=optim_weight_decay)
        self.scheduler = optim.lr_scheduler.StepLR(
            self.optimizer,
            step_size=scheduler_step_size,
            gamma=scheduler_gamma)
        if resume:
            resume_checkpoint = Checkpoint.load(model_only=False)
            model.load_state_dict(resume_checkpoint.model)
            resume_optimizer = resume_checkpoint.optimizer
            resume_scheduler = resume_checkpoint.scheduler
            if resume_optimizer is not None:
                start_epoch = resume_checkpoint.epoch
                start_step = resume_checkpoint.step
                max_acc = resume_checkpoint.max_acc
                self.optimizer.load_state_dict(resume_optimizer)
                self.scheduler.load_state_dict(resume_scheduler)

        self._train_epoches(data_loader=data_loader,
                            model=model,
                            batch_size=batch_size,
                            start_epoch=start_epoch,
                            start_step=start_step,
                            max_acc=max_acc,
                            n_epoch=n_epoch)
        return