) as f:
        ret = []
        f.readline()
        f.readline()
        for line in f:
            line = line.split(' ')
            while line[-1].strip().isdigit() is False:
                line = line[:-1]
            ret.append([' '.join(line[0:-1]).strip(), int(line[-1])])
    attr_type = pd.DataFrame(ret, columns=['attr_name', 'type'])
    attr_type['attr_index'] = ['attr_' + str(i) for i in range(1000)]
    attr_type.set_index('attr_index', inplace=True)

    with torch.no_grad():
        net.eval()
        evaluator = const.EVALUATOR()
        cat_pred_lines = []
        for sample_idx, sample in enumerate(inf_dataloader):

            for key in sample:
                sample[key] = sample[key].to(const.device)
            output = net(sample)

            # all_lines = evaluator.add(output, sample)

            # gt = all_lines[0][0]
            # pred = all_lines[0][1]
            # correct = all_lines[0][-1]

            category_type = sample['category_type'][0].cpu().numpy()
            # lm_size = int(output['lm_pos_map'].shape[2])
                        writer.add_figure(
                            'heatmaps/{}'.format(const.lm2name[i]), fig, step)

                # save and evaluate model
                if (sample_idx + 1) == total_step:
                    #                if (sample_idx + 1)%5 == 0:
                    print('\nSaving Model....')
                    net.set_buffer('step', step)
                    torch.save(net.state_dict(), 'models/' + const.MODEL_NAME)
                    print('OK.')
                    if const.VAL_WHILE_TRAIN:
                        print('Now Evaluate..')
                        with torch.no_grad():
                            net.eval()
                            evaluator = const.EVALUATOR(category_topk=(1, 2,
                                                                       3),
                                                        attr_topk=(1, 2, 3))
                            for j, sample in enumerate(val_dataloader):
                                # move samples to GPU/CPU
                                for key in sample:
                                    sample[key] = sample[key].to(const.device)
                                # perform inference
                                output = net(sample)
                                # add result to evaluator
                                evaluator.add(output, sample)

                                if (j + 1) % 100 == 0:
                                    print('Val Step [{}/{}]'.format(
                                        j + 1, val_step))

                            # get result from evaluator