예제 #1
0
    def __init__(self, __C):
        self.__C = __C

        print('Loading training set........')
        self.dataset = DatasetLoader(__C).DataSet()

        # If trigger the evaluation after every epoch
        # Will create a new cfgs with RUN_MODE = 'val'
        self.dataset_eval = None
        if __C.EVAL_EVERY_EPOCH:
            __C_eval = copy.deepcopy(__C)
            setattr(__C_eval, 'RUN_MODE', 'val')

            print('Loading validation set for per-epoch evaluation........')
            self.dataset_eval = DatasetLoader(__C_eval).DataSet()
예제 #2
0
파일: exec.py 프로젝트: markvasin/openvqa
    def __init__(self, __C, param_dict):
        self.__C = __C

        # if self.__C.WANDB:
        # wandb.init(project="openvqa-gqa", config=param_dict)

        print('Loading dataset........')
        self.dataset = DatasetLoader(__C).DataSet()

        # If trigger the evaluation after every epoch
        # Will create a new cfgs with RUN_MODE = 'val'
        self.dataset_eval = None
        if __C.EVAL_EVERY_EPOCH:
            __C_eval = copy.deepcopy(__C)
            setattr(__C_eval, 'RUN_MODE', 'val')

            print('Loading validation set for per-epoch evaluation........')
            self.dataset_eval = DatasetLoader(__C_eval).DataSet()
예제 #3
0
    def __init__(self, __C):

        #self._save_path = save_path
        #train_transforms = self.build_train_transform(distort_color, resize_scale)
        #train_dataset = datasets.ImageFolder(self.train_path, train_transforms)

        print('Loading dataset........')

        self.train_set = DatasetLoader(__C).DataSet()
        __C_eval = copy.deepcopy(__C)
        setattr(__C_eval, 'RUN_MODE', 'val')
        self.val_set = DatasetLoader(__C_eval).DataSet()
        self.test_set =self.val_set
        """
        self.train = Data.DataLoader(
                train_set,
                batch_size=__C.EVAL_BATCH_SIZE,
                shuffle=False,
                num_workers=__C.NUM_WORKERS,
                pin_memory=__C.PIN_MEM
            )
        self.valid = Data.DataLoader(
                vali_set,
                batch_size=__C.EVAL_BATCH_SIZE,
                shuffle=True,
                num_workers=__C.NUM_WORKERS,
                pin_memory=__C.PIN_MEM
            )
        """

        #self.test = self.valid 

        self.ans_size = self.train_set.ans_size
        self.data_size = self.train_set.data_size
        self.val_data_size = self.val_set.data_size
        self.token_size = self.train_set.token_size
        self.pretrained_emb = self.train_set.pretrained_emb 
예제 #4
0
def visualise_engine(__C):
    # Load parameters
    dataset = DatasetLoader(__C).DataSet()
    if __C.CKPT_PATH is not None:
        print('Warning: you are now using CKPT_PATH args, '
              'CKPT_VERSION and CKPT_EPOCH will not work')

        path = __C.CKPT_PATH
    else:
        path = __C.CKPTS_PATH + \
               '/ckpt_' + __C.CKPT_VERSION + \
               '/epoch' + str(__C.CKPT_EPOCH) + '.pkl'

    print('Loading ckpt from: {}'.format(path))
    state_dict = torch.load(path)['state_dict']
    print('Finish!')

    if __C.N_GPU > 1:
        state_dict = ckpt_proc(state_dict)

    # Store the prediction list
    # qid_list = [ques['question_id'] for ques in dataset.ques_list]
    ans_ix_list = []
    pred_list = []

    data_size = dataset.data_size
    token_size = dataset.token_size
    ans_size = dataset.ans_size
    pretrained_emb = dataset.pretrained_emb

    net = ModelLoader(__C).Net(
        __C,
        pretrained_emb,
        token_size,
        ans_size,
        dataset.token_to_ix
    )
    net.cuda()
    net.eval()

    if __C.N_GPU > 1:
        net = nn.DataParallel(net, device_ids=__C.DEVICES)

    net.load_state_dict(state_dict)

    batch_size = 1
    dataloader = Data.DataLoader(
        dataset,
        batch_size=batch_size,
        shuffle=True,
        num_workers=__C.NUM_WORKERS,
        pin_memory=__C.PIN_MEM
    )

    for step, (
            frcn_feat_iter,
            grid_feat_iter,
            bbox_feat_iter,
            ques_ix_iter,
            ans_iter, image_id, question, words, target_ans
    ) in enumerate(dataloader):
        print("\rEvaluation: [step %4d/%4d]" % (
            step,
            int(data_size / __C.EVAL_BATCH_SIZE),
        ), end='          ')

        frcn_feat_iter = frcn_feat_iter.cuda()
        grid_feat_iter = grid_feat_iter.cuda()
        bbox_feat_iter = bbox_feat_iter.cuda()
        ques_ix_iter = ques_ix_iter.cuda()

        pred, img_attention_map, txt_attention_map = net(
            frcn_feat_iter,
            grid_feat_iter,
            bbox_feat_iter,
            ques_ix_iter
        )
        img_attention_map = img_attention_map[:, :, :, 1:]
        txt_attention_map = txt_attention_map[:, :, :, 1:len(words) + 1]
        pred_np = pred.cpu().data.numpy()
        pred_argmax = np.argmax(pred_np, axis=1)
        ans = dataset.ix_to_ans[pred_argmax[0]]
        plt.interactive(False)
        visualise_img(question['image_filename'][0], question['question'][0], img_attention_map.cpu().data.numpy()[0],
                      ans, target_ans[0])
        visualise_txt(words, txt_attention_map.cpu().data.numpy()[0])