def forward(self, images, image_scales, transitions=None): feature_list = self.encoder(images, image_scales) image_features = feature_list[0] assert len( feature_list) == 1, 'current model only support batch size 1' sample = Sample() sample.dataset_name = "coco" sample.dataset_type = "test" sample.image_feature_0 = image_features # it seems answers work as a place holder here # hence, it does not matter what it's size is sample.answers = torch.zeros((1, 10), dtype=torch.long) sample_list = SampleList([sample]) sample_list = sample_list.to(device) # set_trace() if transitions is not None: sample_list.transitions = transitions output = self.decoder(sample_list) tokens = output['captions'] caption = tokens.tolist()[0] caption = self.decoder.caption_processor(caption)['caption'] return caption
def predict(self, url, feat_name, get_features=False): with torch.no_grad(): detectron_features = get_detectron_features([url], self.detection_model, False, feat_name, self.cuda_device) # returns a single-element list detectron_features = detectron_features[0] sample = Sample() sample.dataset_name = "coco" sample.dataset_type = "test" sample.image_feature_0 = detectron_features sample.answers = torch.zeros((5, 10), dtype=torch.long) sample_list = SampleList([sample]) sample_list = sample_list.to(self.cuda_device) tokens = self.caption_model(sample_list)["captions"] gc.collect() torch.cuda.empty_cache() if not get_features: return tokens else: return tokens, detectron_features
def test_nucleus_sampling(self): vocab = text_utils.VocabFromText(self.VOCAB_EXAMPLE_SENTENCES) model_config = self.config.model_attributes.butd model = TestDecoderModel(model_config, vocab) model.build() model.to("cuda") model.eval() sample = Sample() sample.dataset_name = "coco" sample.dataset_type = "test" sample.image_feature_0 = torch.randn(100, 2048) sample.answers = torch.zeros((5, 10), dtype=torch.long) sample_list = SampleList([sample]) tokens = model(sample_list)["captions"] # these are expected tokens for sum_threshold = 0.5 expected_tokens = [ 1.0000e+00, 2.9140e+03, 5.9210e+03, 2.2040e+03, 5.0550e+03, 9.2240e+03, 4.5120e+03, 1.8200e+02, 3.6490e+03, 6.4090e+03, 2.0000e+00 ] self.assertEqual(tokens[0].tolist(), expected_tokens)
def test_caption_bleu4(self): path = os.path.join( os.path.abspath(__file__), "../../../pythia/common/defaults/configs/datasets/captioning/coco.yml", ) with open(os.path.abspath(path)) as f: config = yaml.load(f, Loader=yaml.FullLoader) config = ConfigNode(config) captioning_config = config.dataset_attributes.coco caption_processor_config = captioning_config.processors.caption_processor vocab_path = os.path.join(os.path.abspath(__file__), "..", "..", "data", "vocab.txt") caption_processor_config.params.vocab.vocab_file = os.path.abspath( vocab_path) caption_processor = CaptionProcessor(caption_processor_config.params) registry.register("coco_caption_processor", caption_processor) caption_bleu4 = metrics.CaptionBleu4Metric() expected = Sample() predicted = dict() # Test complete match expected.answers = torch.empty((5, 5, 10)) expected.answers.fill_(4) predicted["scores"] = torch.zeros((5, 10, 19)) predicted["scores"][:, :, 4] = 1.0 self.assertEqual( caption_bleu4.calculate(expected, predicted).item(), 1.0) # Test partial match expected.answers = torch.empty((5, 5, 10)) expected.answers.fill_(4) predicted["scores"] = torch.zeros((5, 10, 19)) predicted["scores"][:, 0:5, 4] = 1.0 self.assertAlmostEqual( caption_bleu4.calculate(expected, predicted).item(), 0.3928, 4)
def get_item(self, idx): data = self.vqamb_data[idx] current_sample = Sample() # store queston and image id current_sample.img_id = data['id'] current_sample.qa_id = data['qa_id'] # process question question = data["question"] tokens = tokenize(question, remove=["?"], keep=["'s"]) processed = self.text_processor({"tokens": tokens}) current_sample.text = processed["text"] # process answers processed = self.answer_processor({"answers": [data['answer']]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"][1:] # remove unknown index # Detectron features ---------------- # TODO: read in detectron image instead if detectron is to be built detectron_path = self.detectron_folder + str(data['id']) if self.config.spatial: point = data['point'] # current_sample.point = point detectron_path += ',' + str(point['x']) + ',' + str(point['y']) detectron_path += '.pt' detectron_feat = torch.load(detectron_path, map_location=torch.device('cpu')) # Pad features to fixed length if self.config.pad_detectron: if detectron_feat.shape[0] > 100: detectron_feat = detectron_feat[:100] elif detectron_feat.shape[0] < 100: pad = torch.zeros(100 - detectron_feat.shape[0], detectron_feat.shape[1]) detectron_feat = torch.cat([detectron_feat, pad], dim=0) current_sample.image_feature_0 = detectron_feat # --------------------------------------------- return current_sample
def predict(self, url): with torch.no_grad(): detectron_features = self.get_detectron_features(url) sample = Sample() sample.dataset_name = "coco" sample.dataset_type = "test" sample.image_feature_0 = detectron_features sample.answers = torch.zeros((5, 10), dtype=torch.long) sample_list = SampleList([sample]) sample_list = sample_list.to("cuda") tokens = self.pythia_model(sample_list)["captions"] gc.collect() torch.cuda.empty_cache() return tokens
def get_item(self, idx): data = self.questions[idx] # Each call to get_item from dataloader returns a Sample class object which # collated by our special batch collator to a SampleList which is basically # a attribute based batch in layman terms current_sample = Sample() question = data["question"] tokens = tokenize(question, keep=[";", ","], remove=["?", "."]) processed = self.text_processor({"tokens": tokens}) current_sample.text = processed["text"] processed = self.answer_processor({"answers": [data["answer"]]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"] image_path = os.path.join(self.image_path, data["image_filename"]) image = np.true_divide(Image.open(image_path).convert("RGB"), 255) image = image.astype(np.float32) current_sample.image = torch.from_numpy(image.transpose(2, 0, 1)) return current_sample
def load_item(self, idx): sample = Sample() image_id = self.annotations[idx][0] image_folder = image_id.split('_')[0] caption = self.annotations[idx][1] tokens = tokenize(caption) tokens = ['<s>'] + tokens + ['</s>'] # use text_processor to process caption # pad sequence, convert token to indices and add SOS, EOS token # text_processor already contains a pre-processor to tokenize caption caption_p = self.text_processor({'tokens': tokens}) sample.text = caption_p['text'] sample.caption_len = torch.tensor(len(tokens), dtype=torch.int) # sample.target = caption_p['text'] sample.answers = torch.stack([caption_p['text']]) # generate image features image_path = os.path.join(self.image_dir, image_folder, image_id) image, image_scale = self._image_transform(image_path) with torch.no_grad(): image_features = self.feature_extractor([image], [image_scale]) image_features = image_features[0] sample.image_feature_0 = image_features.cpu() return sample
def get_item(self, idx): data = self.vqamb_data[idx] current_sample = Sample() # store queston and image id current_sample.img_id = data['id'] # current_sample.qa_id = data['qa_id'] # store points current_sample.point = data['point'] # data['points'] bbox = data['bbox'] current_sample.gt_bbox = torch.Tensor([bbox['x'], bbox['y'], bbox['x'] + bbox['w'], bbox['y'] + bbox['h']]) # process question question = data["pt_question"] tokens = tokenize(question, remove=["?"], keep=["'s"]) processed = self.text_processor({"tokens": tokens}) current_sample.text = processed["text"] # process answers processed = self.answer_processor({"answers": [data['ans']]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"][1:] # remove unknown index # Detectron features ---------------- # TODO: read in detectron image instead if detectron is to be built detectron_path = self.detectron_folder + str(data['id']) point = data['point'] # point = data['points'][0] if 'pt' in self.detectron_folder: detectron_path += ',' + str(point['x']) + ',' + str(point['y']) detectron_path += '.pt' detectron_feat = torch.load(detectron_path, map_location=torch.device('cpu')) # Pad features to fixed length if self.config.pad_detectron: if detectron_feat.shape[0] > 100: detectron_feat = detectron_feat[:100] elif detectron_feat.shape[0] < 100: pad = torch.zeros(100 - detectron_feat.shape[0], detectron_feat.shape[1]) detectron_feat = torch.cat([detectron_feat, pad], dim=0) current_sample.image_feature_0 = detectron_feat # --------------------------------------------- # read in bounding boxes (hardcoded for now) bbox_path = '' bbox_path += str(data['id']) + ',' + str(point['x']) + ',' + str(point['y']) + '.pt' bboxes = torch.load(bbox_path, map_location=torch.device('cpu')) if bboxes.shape[0] > 100: bboxes = bboxes[:100] elif bboxes.shape[0] < 100: pad = torch.zeros(100 - bboxes.shape[0], bboxes.shape[1]) bboxes = torch.cat([bboxes, pad], dim=0) current_sample.pt_bbox = bboxes # read in image bounding boxes bbox_path = '' bbox_path += str(data['id']) + '.pt' # + ',' + str(point['x']) + ',' + str(point['y']) + '.pt' bboxes = torch.load(bbox_path, map_location=torch.device('cpu')) if bboxes.shape[0] > 100: bboxes = bboxes[:100] elif bboxes.shape[0] < 100: pad = torch.zeros(100 - bboxes.shape[0], bboxes.shape[1]) bboxes = torch.cat([bboxes, pad], dim=0) current_sample.img_bbox = bboxes # Context features -------------------- if self.config.use_context: context_path = self.context_folder + str(data['id']) context_path += ',' + str(point['x']) + ',' + str(point['y']) context_path += '.pt' context_feat = torch.load(context_path, map_location=torch.device('cpu')) context_feat = context_feat.squeeze() orig_dim = context_feat.shape[0] if self.config.pad_context: if context_feat.shape[0] > 100: context_feat = context_feat[:100] elif context_feat.shape[0] < 100: pad = torch.zeros(100 - context_feat.shape[0], context_feat.shape[1]) context_feat = torch.cat([context_feat, pad], dim=0) current_sample.context_feature_0 = context_feat # --------------------------------------------- return current_sample
def get_item(self, idx): data = self.vqamb_data[idx] current_sample = Sample() # store queston and image id current_sample.img_id = data['id'] current_sample.qa_id = data['qa_index'] # store points current_sample.points = data['points'] obj = data['all_objs'][0] xmin, ymin, xmax, ymax = obj['x'], obj[ 'y'], obj['x'] + obj['w'], obj['y'] + obj['h'] current_sample.gt_bbox = torch.Tensor([xmin, ymin, xmax, ymax]) # process question question = data["question"] tokens = tokenize(question, remove=["?"]) processed = self.text_processor({"tokens": tokens}) current_sample.text = processed["text"] # process answers processed = self.answer_processor({"answers": data['all_ans']}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"][ 1:] # remove unknown index # Detectron features ---------------- # TODO: read in detectron image instead if detectron is to be built detectron_path = self.detectron_folder + str(data['id']) bbox_path = self.bbox_folder + str(data['id']) if 'pt' in self.detectron_folder: point = data['points'][0] detectron_path += ',' + str(point['x']) + ',' + str(point['y']) bbox_path += ',' + str(point['x']) + ',' + str(point['y']) detectron_path += '.pt' bbox_path += '.pt' detectron_feat = torch.load( detectron_path, map_location=torch.device('cpu')).squeeze() # bbox_feat = torch.load(bbox_path, map_location=torch.device('cpu')).squeeze() '''if detectron_feat.shape[0] == 2048: detectron_feat = detectron_feat.unsqueeze(0) bbox_feat = bbox_feat.unsqueeze(0) ''' ''' if self.config.grid: detectron_feat = detectron_feat.view(detectron_feat.shape[0], -1).T ''' # x_down = max(int(round(pt['x']/600)), 18) # y_down = int(round(pt['y']/800), 25) # preproessing for grid features only # detectron_feat = detectron_feat.view(detectron_feat.shape[0], -1).T # Pad features to fixed length if self.config.grid: MAX_FEAT = 608 else: MAX_FEAT = 100 if self.config.pad_detectron: if detectron_feat.shape[0] > MAX_FEAT: detectron_feat = detectron_feat[:MAX_FEAT] # bbox_feat = bbox_feat[:MAX_FEAT] elif detectron_feat.shape[0] < MAX_FEAT: pad = torch.zeros(MAX_FEAT - detectron_feat.shape[0], detectron_feat.shape[1]) detectron_feat = torch.cat([detectron_feat, pad], dim=0) pad = torch.zeros(MAX_FEAT - bbox_feat.shape[0], bbox_feat.shape[1]) bbox_feat = torch.cat([bbox_feat, pad], dim=0) ''' else: if detectron_feat.dim() > 1: detectron_feat = torch.zeros(2048) ''' # current_sample.bbox = bbox_feat current_sample.image_feature_0 = detectron_feat # --------------------------------------------- return current_sample
def get_item(self, idx): data = self.objpart_data[idx] current_sample = Sample() # store queston and image id current_sample.img_id = data['id'] # current_sample.qa_id = data['qa_id'] if data['ans'] == 'part': current_sample.part = 1 else: current_sample.part = 0 # store points current_sample.point = data['point'] # process question question = data["question"] tokens = tokenize(question, remove=["?"]) processed = self.text_processor({"tokens": tokens}) current_sample.text = processed["text"] # process answers processed = self.answer_processor({"answers": [data['ans']]}) current_sample.answers = processed["answers"] current_sample.targets = processed["answers_scores"][ 1:] # remove unknown index # Detectron features ---------------- # TODO: read in detectron image instead if detectron is to be built detectron_path = self.detectron_folder + str(data['id']) if 'pt' in self.detectron_folder: # hacky way of assessing point supervision point = data['point'] detectron_path += ',' + str(point['x']) + ',' + str(point['y']) detectron_path += '.pt' detectron_feat = torch.load( detectron_path, map_location=torch.device('cpu')).squeeze() # hardcode bounding box and read it # x_down = max(int(round(pt['x']/600)), 18) # y_down = int(round(pt['y']/800), 25) # preproessing for grid features only # detectron_feat = detectron_feat.view(detectron_feat.shape[0], -1).T # Pad features to fixed length MAX_FEAT = 100 if self.config.pad_detectron: if detectron_feat.shape[0] > MAX_FEAT: detectron_feat = detectron_feat[:MAX_FEAT] elif detectron_feat.shape[0] < MAX_FEAT: pad = torch.zeros(MAX_FEAT - detectron_feat.shape[0], detectron_feat.shape[1]) detectron_feat = torch.cat([detectron_feat, pad], dim=0) ''' else: if detectron_feat.dim() > 1: detectron_feat = torch.zeros(2048) ''' current_sample.image_feature_0 = detectron_feat # --------------------------------------------- return current_sample