def predict(self, url, feat_name, get_features=False):
        with torch.no_grad():
            detectron_features = get_detectron_features([url],
                                                        self.detection_model,
                                                        False, feat_name,
                                                        self.cuda_device)
            # returns a single-element list
            detectron_features = detectron_features[0]

            sample = Sample()
            sample.dataset_name = "coco"
            sample.dataset_type = "test"
            sample.image_feature_0 = detectron_features
            sample.answers = torch.zeros((5, 10), dtype=torch.long)

            sample_list = SampleList([sample])
            sample_list = sample_list.to(self.cuda_device)

            tokens = self.caption_model(sample_list)["captions"]

        gc.collect()
        torch.cuda.empty_cache()

        if not get_features:
            return tokens
        else:
            return tokens, detectron_features
    def forward(self, images, image_scales, transitions=None):
        feature_list = self.encoder(images, image_scales)
        image_features = feature_list[0]
        assert len(
            feature_list) == 1, 'current model only support batch size 1'

        sample = Sample()
        sample.dataset_name = "coco"
        sample.dataset_type = "test"
        sample.image_feature_0 = image_features
        # it seems answers work as a place holder here
        # hence, it does not matter what it's size is
        sample.answers = torch.zeros((1, 10), dtype=torch.long)
        sample_list = SampleList([sample])
        sample_list = sample_list.to(device)
        # set_trace()
        if transitions is not None:
            sample_list.transitions = transitions

        output = self.decoder(sample_list)
        tokens = output['captions']
        caption = tokens.tolist()[0]
        caption = self.decoder.caption_processor(caption)['caption']

        return caption
Beispiel #3
0
    def test_nucleus_sampling(self):
        vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES)

        model_config = self.config.model_attributes.butd
        model = TestDecoderModel(model_config, vocab)
        model.build()
        model.to("cuda")
        model.eval()

        sample = Sample()
        sample.dataset_name = "coco"
        sample.dataset_type = "test"
        sample.image_feature_0 = torch.randn(100, 2048)
        sample.answers = torch.zeros((5, 10), dtype=torch.long)
        sample_list = SampleList([sample])

        tokens = model(sample_list)["captions"]

        # these are expected tokens for sum_threshold = 0.5
        expected_tokens = [
            1.0000e+00, 2.9140e+03, 5.9210e+03, 2.2040e+03, 5.0550e+03,
            9.2240e+03, 4.5120e+03, 1.8200e+02, 3.6490e+03, 6.4090e+03,
            2.0000e+00
        ]

        self.assertEqual(tokens[0].tolist(), expected_tokens)
Beispiel #4
0
    def predict(self, img_paths, qud):
        """
        We enable batch prediction here
        :return:
        """
        with torch.no_grad():
            detectron_features = self.get_detectron_features(
                img_paths)  # a list of image features
            resnet_features = self.get_resnet_features(
                img_paths)  # [batch_size, 196, 2048]

            sample_list = []
            for i in range(len(detectron_features)):
                sample = Sample()
                processed_text = self.vqa_demo.text_processor({"text": qud})
                sample.text = processed_text["text"]
                sample.text_len = len(processed_text["tokens"])

                sample.image_feature_0 = detectron_features[i]
                sample.image_info_0 = Sample(
                    {"max_features": torch.tensor(100, dtype=torch.long)})
                sample.image_feature_1 = resnet_features[i]
                sample_list.append(sample)

            sample_list = SampleList(sample_list)
            sample_list = sample_list.to("cuda")

            scores = self.vqa_demo.pythia_model(sample_list)["scores"]
            scores = torch.nn.functional.softmax(scores, dim=1)
            actual, indices = scores.topk(5, dim=1)

            batch_probs = []
            batch_answers = []

            for i in range(scores.shape[0]):
                top_indices = indices[i]
                top_scores = actual[i]

                probs = []
                answers = []

                for idx, score in enumerate(top_scores):
                    probs.append(score.item())
                    answers.append(
                        self.vqa_demo.answer_processor.idx2word(
                            top_indices[idx].item()))
                batch_probs.append(probs)
                batch_answers.append(answers)

        ## if the memory becomes an issue, we then clear this
        # gc.collect()
        # torch.cuda.empty_cache()

        # list is of batch_size
        # [[ans_1, ans_2], [ans_1, ans2]]
        return batch_probs, batch_answers
	def get_item(self, idx):

		data = self.vqamb_data[idx]

		current_sample = Sample()

		# store queston and image id
		current_sample.img_id = data['id']
		current_sample.qa_id = data['qa_id']

		# process question
		question = data["question"]
		tokens = tokenize(question, remove=["?"], keep=["'s"])

		processed = self.text_processor({"tokens": tokens})
		current_sample.text = processed["text"]

		# process answers
		processed = self.answer_processor({"answers": [data['answer']]})
		current_sample.answers = processed["answers"]
		current_sample.targets = processed["answers_scores"][1:] # remove unknown index
		# Detectron features ----------------
		# TODO: read in detectron image instead if detectron is to be built
		detectron_path = self.detectron_folder + str(data['id'])
		if self.config.spatial:
			point = data['point']
			# current_sample.point = point
			detectron_path += ',' + str(point['x']) + ',' + str(point['y'])
		detectron_path += '.pt'
		
		detectron_feat = torch.load(detectron_path, map_location=torch.device('cpu'))

		# Pad features to fixed length
		if self.config.pad_detectron:
			if detectron_feat.shape[0] > 100:
				detectron_feat = detectron_feat[:100]
			elif detectron_feat.shape[0] < 100:
				pad = torch.zeros(100 - detectron_feat.shape[0], detectron_feat.shape[1])
				detectron_feat = torch.cat([detectron_feat, pad], dim=0)

		current_sample.image_feature_0 = detectron_feat
		# ---------------------------------------------

		return current_sample
Beispiel #6
0
    def predict(self, url):
        with torch.no_grad():
            detectron_features = self.get_detectron_features(url)

            sample = Sample()
            sample.dataset_name = "coco"
            sample.dataset_type = "test"
            sample.image_feature_0 = detectron_features
            sample.answers = torch.zeros((5, 10), dtype=torch.long)

            sample_list = SampleList([sample])
            sample_list = sample_list.to("cuda")

            tokens = self.pythia_model(sample_list)["captions"]

        gc.collect()
        torch.cuda.empty_cache()

        return tokens
Beispiel #7
0
    def predict(self, url, question):
        with torch.no_grad():
            detectron_features = self.get_detectron_features(url)
            resnet_features = self.get_resnet_features(url)

            sample = Sample()

            processed_text = self.text_processor({"text": question})
            sample.text = processed_text["text"]
            sample.text_len = len(processed_text["tokens"])

            sample.image_feature_0 = detectron_features
            sample.image_info_0 = Sample({
                "max_features": torch.tensor(100, dtype=torch.long)
            })

            sample.image_feature_1 = resnet_features

            sample_list = SampleList([sample])
            sample_list = sample_list.to("cuda")

            scores = self.pythia_model(sample_list)["scores"]
            scores = torch.nn.functional.softmax(scores, dim=1)
            actual, indices = scores.topk(5, dim=1)

            top_indices = indices[0]
            top_scores = actual[0]

            probs = []
            answers = []

            for idx, score in enumerate(top_scores):
                probs.append(score.item())
                answers.append(
                    self.answer_processor.idx2word(top_indices[idx].item())
                )

        gc.collect()
        torch.cuda.empty_cache()
        return probs, answers
Beispiel #8
0
 def load_item(self, idx):
     sample = Sample()
     image_id = self.annotations[idx][0]
     image_folder = image_id.split('_')[0]
     caption = self.annotations[idx][1]
     tokens = tokenize(caption)
     tokens = ['<s>'] + tokens + ['</s>']
     # use text_processor to process caption
     # pad sequence, convert token to indices and add SOS, EOS token
     # text_processor already contains a pre-processor to tokenize caption
     caption_p = self.text_processor({'tokens': tokens})
     sample.text = caption_p['text']
     sample.caption_len = torch.tensor(len(tokens), dtype=torch.int)
     # sample.target = caption_p['text']
     sample.answers = torch.stack([caption_p['text']])
     # generate image features
     image_path = os.path.join(self.image_dir, image_folder, image_id)
     image, image_scale = self._image_transform(image_path)
     with torch.no_grad():
         image_features = self.feature_extractor([image], [image_scale])
     image_features = image_features[0]
     sample.image_feature_0 = image_features.cpu()
     return sample
Beispiel #9
0
    def getAnswers(self, image, question, meta=None):

        first = time.time()
        meta = meta or str(image)
        image = Image.open(image).convert('RGB') if isinstance(image, str) else \
                image.convert('RGB')

        print(f'Tiki : Getting Answers : {meta}, {question}')

        with torch.no_grad():

            detectron_features = self.get_detectron_features(image)
            resnet152_features = self.get_resnet152_features(image)

            start = time.time()
            sample = Sample()

            processed_text = self.text_processor({'text': question})
            sample.text = processed_text['text']
            sample.text_len = len(processed_text['tokens'])

            sample.image_feature_0 = detectron_features
            sample.image_info_0 = Sample(
                {'max_features': torch.tensor(100, dtype=torch.long)})

            sample.image_feature_1 = resnet152_features

            sample_list = SampleList([sample])

            sample_list = sample_list.to(self.device.type)

            scores = self.pythiaVQA_model(sample_list)['scores']
            scores = torch.nn.functional.softmax(scores, dim=1)
            actual, indices = scores.topk(5, dim=1)

            top_indices = indices[0]
            top_scores = actual[0]

            answers = []

            for rank, score in enumerate(top_scores):
                answers.append({
                    'rank':
                    rank,
                    'answer':
                    self.answer_processor.idx2word(top_indices[rank].item()),
                    'probability':
                    score.item()
                })

            answer = answers[0]['answer']

            end = time.time()

        print(
            f'Tiki : Getting Answers : PythiaVQA - Finished in {end-start:7.3f} Seconds'
        )

        processing['PythiaVQA'] = end - start

        gc.collect()

        torch.cuda.empty_cache()

        last = time.time()

        processing['InferTime'] = last - first

        return question, answer, answers
Beispiel #10
0
	def get_item(self, idx):

		data = self.vqamb_data[idx]

		current_sample = Sample()

		# store queston and image id
		current_sample.img_id = data['id']
		# current_sample.qa_id = data['qa_id']

		# store points
		current_sample.point = data['point'] # data['points']
		bbox = data['bbox']
		current_sample.gt_bbox = torch.Tensor([bbox['x'], bbox['y'], bbox['x'] + bbox['w'], bbox['y'] + bbox['h']])

		# process question
		question = data["pt_question"]
		tokens = tokenize(question, remove=["?"], keep=["'s"])

		processed = self.text_processor({"tokens": tokens})
		current_sample.text = processed["text"]

		# process answers
		processed = self.answer_processor({"answers": [data['ans']]})
		current_sample.answers = processed["answers"]
		current_sample.targets = processed["answers_scores"][1:] # remove unknown index

		# Detectron features ----------------
		# TODO: read in detectron image instead if detectron is to be built
		detectron_path = self.detectron_folder + str(data['id'])
		point = data['point'] # point = data['points'][0]
		if 'pt' in self.detectron_folder:
			detectron_path += ',' + str(point['x']) + ',' + str(point['y'])
		detectron_path += '.pt'
		
		detectron_feat = torch.load(detectron_path, map_location=torch.device('cpu'))

		# Pad features to fixed length
		if self.config.pad_detectron:
			if detectron_feat.shape[0] > 100:
				detectron_feat = detectron_feat[:100]
			elif detectron_feat.shape[0] < 100:
				pad = torch.zeros(100 - detectron_feat.shape[0], detectron_feat.shape[1])
				detectron_feat = torch.cat([detectron_feat, pad], dim=0)

		current_sample.image_feature_0 = detectron_feat
		# ---------------------------------------------

		# read in bounding boxes (hardcoded for now)
		
		bbox_path = ''
		bbox_path  += str(data['id']) + ',' + str(point['x']) + ',' + str(point['y']) + '.pt'
		bboxes = torch.load(bbox_path, map_location=torch.device('cpu'))

		if bboxes.shape[0] > 100:
			bboxes = bboxes[:100]
		elif bboxes.shape[0] < 100:
			pad = torch.zeros(100 - bboxes.shape[0], bboxes.shape[1])
			bboxes = torch.cat([bboxes, pad], dim=0)

		current_sample.pt_bbox = bboxes

		# read in image bounding boxes
		bbox_path = ''
		bbox_path  += str(data['id']) + '.pt' # + ',' + str(point['x']) + ',' + str(point['y']) + '.pt'
		bboxes = torch.load(bbox_path, map_location=torch.device('cpu'))

		if bboxes.shape[0] > 100:
			bboxes = bboxes[:100]
		elif bboxes.shape[0] < 100:
			pad = torch.zeros(100 - bboxes.shape[0], bboxes.shape[1])
			bboxes = torch.cat([bboxes, pad], dim=0)

		current_sample.img_bbox = bboxes
		
		# Context features --------------------
		if self.config.use_context:
			context_path = self.context_folder + str(data['id'])
			context_path += ',' + str(point['x']) + ',' + str(point['y'])
			context_path += '.pt'

			context_feat = torch.load(context_path, map_location=torch.device('cpu'))
			context_feat = context_feat.squeeze()
			orig_dim = context_feat.shape[0]

			if self.config.pad_context:
				if context_feat.shape[0] > 100:
					context_feat = context_feat[:100]
				elif context_feat.shape[0] < 100:
					pad = torch.zeros(100 - context_feat.shape[0], context_feat.shape[1])
					context_feat = torch.cat([context_feat, pad], dim=0)

			current_sample.context_feature_0 = context_feat
		# ---------------------------------------------

		return current_sample
Beispiel #11
0
    def get_item(self, idx):

        data = self.vqamb_data[idx]

        current_sample = Sample()

        # store queston and image id
        current_sample.img_id = data['id']
        current_sample.qa_id = data['qa_index']

        # store points
        current_sample.points = data['points']

        obj = data['all_objs'][0]
        xmin, ymin, xmax, ymax = obj['x'], obj[
            'y'], obj['x'] + obj['w'], obj['y'] + obj['h']
        current_sample.gt_bbox = torch.Tensor([xmin, ymin, xmax, ymax])

        # process question
        question = data["question"]
        tokens = tokenize(question, remove=["?"])

        processed = self.text_processor({"tokens": tokens})
        current_sample.text = processed["text"]

        # process answers
        processed = self.answer_processor({"answers": data['all_ans']})
        current_sample.answers = processed["answers"]
        current_sample.targets = processed["answers_scores"][
            1:]  # remove unknown index

        # Detectron features ----------------
        # TODO: read in detectron image instead if detectron is to be built
        detectron_path = self.detectron_folder + str(data['id'])
        bbox_path = self.bbox_folder + str(data['id'])
        if 'pt' in self.detectron_folder:
            point = data['points'][0]
            detectron_path += ',' + str(point['x']) + ',' + str(point['y'])
            bbox_path += ',' + str(point['x']) + ',' + str(point['y'])

        detectron_path += '.pt'
        bbox_path += '.pt'

        detectron_feat = torch.load(
            detectron_path, map_location=torch.device('cpu')).squeeze()
        # bbox_feat = torch.load(bbox_path, map_location=torch.device('cpu')).squeeze()
        '''if detectron_feat.shape[0] == 2048:
			detectron_feat = detectron_feat.unsqueeze(0)
			bbox_feat = bbox_feat.unsqueeze(0)
		'''
        '''
		if self.config.grid:
			 detectron_feat = detectron_feat.view(detectron_feat.shape[0], -1).T
		'''
        # x_down = max(int(round(pt['x']/600)), 18)
        # y_down = int(round(pt['y']/800), 25)

        # preproessing for grid features only
        # detectron_feat = detectron_feat.view(detectron_feat.shape[0], -1).T

        # Pad features to fixed length
        if self.config.grid:
            MAX_FEAT = 608

        else:
            MAX_FEAT = 100

        if self.config.pad_detectron:
            if detectron_feat.shape[0] > MAX_FEAT:
                detectron_feat = detectron_feat[:MAX_FEAT]
                # bbox_feat = bbox_feat[:MAX_FEAT]
            elif detectron_feat.shape[0] < MAX_FEAT:
                pad = torch.zeros(MAX_FEAT - detectron_feat.shape[0],
                                  detectron_feat.shape[1])
                detectron_feat = torch.cat([detectron_feat, pad], dim=0)
                pad = torch.zeros(MAX_FEAT - bbox_feat.shape[0],
                                  bbox_feat.shape[1])
                bbox_feat = torch.cat([bbox_feat, pad], dim=0)
        '''
		else:
			if detectron_feat.dim() > 1:
				detectron_feat = torch.zeros(2048)
		'''
        # current_sample.bbox = bbox_feat
        current_sample.image_feature_0 = detectron_feat
        # ---------------------------------------------

        return current_sample
Beispiel #12
0
    def get_item(self, idx):

        data = self.objpart_data[idx]

        current_sample = Sample()

        # store queston and image id
        current_sample.img_id = data['id']
        # current_sample.qa_id = data['qa_id']

        if data['ans'] == 'part':
            current_sample.part = 1

        else:
            current_sample.part = 0

        # store points
        current_sample.point = data['point']

        # process question
        question = data["question"]
        tokens = tokenize(question, remove=["?"])

        processed = self.text_processor({"tokens": tokens})
        current_sample.text = processed["text"]

        # process answers
        processed = self.answer_processor({"answers": [data['ans']]})
        current_sample.answers = processed["answers"]
        current_sample.targets = processed["answers_scores"][
            1:]  # remove unknown index

        # Detectron features ----------------
        # TODO: read in detectron image instead if detectron is to be built
        detectron_path = self.detectron_folder + str(data['id'])
        if 'pt' in self.detectron_folder:  # hacky way of assessing point supervision
            point = data['point']
            detectron_path += ',' + str(point['x']) + ',' + str(point['y'])

        detectron_path += '.pt'

        detectron_feat = torch.load(
            detectron_path, map_location=torch.device('cpu')).squeeze()

        # hardcode bounding box and read it

        # x_down = max(int(round(pt['x']/600)), 18)
        # y_down = int(round(pt['y']/800), 25)

        # preproessing for grid features only
        # detectron_feat = detectron_feat.view(detectron_feat.shape[0], -1).T

        # Pad features to fixed length
        MAX_FEAT = 100

        if self.config.pad_detectron:
            if detectron_feat.shape[0] > MAX_FEAT:
                detectron_feat = detectron_feat[:MAX_FEAT]
            elif detectron_feat.shape[0] < MAX_FEAT:
                pad = torch.zeros(MAX_FEAT - detectron_feat.shape[0],
                                  detectron_feat.shape[1])
                detectron_feat = torch.cat([detectron_feat, pad], dim=0)
        '''
		else:
			if detectron_feat.dim() > 1:
				detectron_feat = torch.zeros(2048)
		'''
        current_sample.image_feature_0 = detectron_feat
        # ---------------------------------------------

        return current_sample