Ejemplo n.º 1
0
 def _add_proposals_from_file(self, roidb, proposal_file, min_proposal_size,
                              top_k, crowd_thresh):
     """Add proposals from a proposals file to an roidb."""
     logger.info('Loading proposals from: {}'.format(proposal_file))
     with open(proposal_file, 'r') as f:
         proposals = pickle.load(f)
     id_field = 'indexes' if 'indexes' in proposals else 'ids'  # compat fix
     _sort_proposals(proposals, id_field)
     box_list = []
     for i, entry in enumerate(roidb):
         if i % 2500 == 0:
             logger.info(' {:d}/{:d}'.format(i + 1, len(roidb)))
         boxes = proposals['boxes'][i]
         # Sanity check that these boxes are for the correct image id
         assert entry['id'] == proposals[id_field][i]
         # Remove duplicate boxes and very small boxes and then take top k
         boxes = box_utils.clip_boxes_to_image(boxes, entry['height'],
                                               entry['width'])
         keep = box_utils.unique_boxes(boxes)
         boxes = boxes[keep, :]
         keep = box_utils.filter_small_boxes(boxes, min_proposal_size)
         boxes = boxes[keep, :]
         if top_k > 0:
             boxes = boxes[:top_k, :]
         box_list.append(boxes)
     _merge_proposal_boxes_into_roidb(roidb, box_list)
     if crowd_thresh > 0:
         _filter_crowd_proposals(roidb, crowd_thresh)
Ejemplo n.º 2
0
 def _add_proposals_from_file(self, roidb, proposal_file, min_proposal_size,
                              top_k):
     """Add proposals from a proposals file to an roidb.
     """
     logger.info('Loading proposals from: {}'.format(proposal_file))
     with open(proposal_file, 'r') as f:
         proposals = pickle.load(f)
     #proposals[seq_idx][idx]
     box_list = []
     for i, entry in enumerate(roidb):
         if i % 500 == 0:
             logger.info(' {:d}/{:d}'.format(i + 1, len(roidb)))
         seq_idx = entry['seq_idx']
         idx = entry['idx']
         boxes = proposals['boxes'][seq_idx][idx]
         # Remove duplicate boxes and very small boxes and then take top k
         boxes = box_utils.clip_boxes_to_image(boxes, entry['height'],
                                               entry['width'])
         keep = box_utils.unique_boxes(boxes)
         boxes = boxes[keep, :]
         keep = box_utils.filter_small_boxes(boxes, min_proposal_size)
         boxes = boxes[keep, :]
         if top_k > 0:
             boxes = boxes[:top_k, :]
         box_list.append(boxes)
     _merge_proposal_boxes_into_roidb(roidb, box_list)
Ejemplo n.º 3
0
 def _add_proposals_from_file(
     self, roidb, proposal_file, min_proposal_size, top_k, crowd_thresh
 ):
     """Add proposals from a proposals file to an roidb."""
     logger.info('Loading proposals from: {}'.format(proposal_file))
     with open(proposal_file, 'r') as f:
         proposals = pickle.load(f)
     id_field = 'indexes' if 'indexes' in proposals else 'ids'  # compat fix
     _sort_proposals(proposals, id_field)
     box_list = []
     for i, entry in enumerate(roidb):
         if i % 2500 == 0:
             logger.info(' {:d}/{:d}'.format(i + 1, len(roidb)))
         boxes = proposals['boxes'][i]
         # Sanity check that these boxes are for the correct image id
         assert entry['id'] == proposals[id_field][i]
         # Remove duplicate boxes and very small boxes and then take top k
         boxes = box_utils.clip_boxes_to_image(
             boxes, entry['height'], entry['width']
         )
         keep = box_utils.unique_boxes(boxes)
         boxes = boxes[keep, :]
         keep = box_utils.filter_small_boxes(boxes, min_proposal_size)
         boxes = boxes[keep, :]
         if top_k > 0:
             boxes = boxes[:top_k, :]
         box_list.append(boxes)
     _merge_proposal_boxes_into_roidb(roidb, box_list)
     if crowd_thresh > 0:
         _filter_crowd_proposals(roidb, crowd_thresh)
Ejemplo n.º 4
0
	def __init__(self,
				 root,
				 year='2007',
				 image_set='trainval',
				 download=False,
				 transforms=None):

		self.root = root

		self.transforms = transforms
		self.year = year
		if year == "2007" and image_set == "test":
			year = "2007-test"
		self.url = DATASET_YEAR_DICT[year]['url']
		self.filename = DATASET_YEAR_DICT[year]['filename']
		self.md5 = DATASET_YEAR_DICT[year]['md5']
		valid_sets = ["train", "trainval", "val"]
		if year == "2007-test":
			valid_sets.append("test")
		self.image_set = verify_str_arg(image_set, "image_set", valid_sets)

		base_dir = DATASET_YEAR_DICT[year]['base_dir']
		voc_root = os.path.join(self.root, base_dir)
		image_dir = os.path.join(voc_root, 'JPEGImages')
		annotation_dir = os.path.join(voc_root, 'Annotations')

		if download:
			download_extract(self.url, self.root, self.filename, self.md5)

		# pause()
		if not os.path.isdir(voc_root):
			raise RuntimeError('Dataset not found or corrupted.' +
							   ' You can use download=True to download it')

		splits_dir = os.path.join(voc_root, 'ImageSets/Main')

		split_f = os.path.join(splits_dir, image_set.rstrip('\n') + '.txt')

		with open(os.path.join(split_f), "r") as f:
			file_names = [x.strip() for x in f.readlines()]



		self.class_labels = ['aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor']

		self.num_classes  = len(self.class_labels)

		self.images = [os.path.join(image_dir, x + ".jpg") for x in file_names]
		annotations = [os.path.join(annotation_dir, x + ".xml") for x in file_names]
		assert (len(self.images) == len(annotations))


		#load annotations:
		self.annotations = []
		for i, ann_path in enumerate(annotations):
			raw_annot = self.parse_voc_xml(ET.parse(ann_path).getroot())['annotation']
			self.annotations.append({
				'img_key': int(raw_annot['filename'].replace('.jpg','')),
				'width'  : int(raw_annot['size']['width']),
				'height' : int(raw_annot['size']['height']),
				'object'  : raw_annot['object']
				})
		


		##TODO ajust this to be more beauty =p
		ss_data = self.root + 'selective_search_data/voc_' + self.year + '_' + self.image_set + '.pkl'
		with open(ss_data, 'rb') as f:
			proposals = pickle.load(f)

		sort_proposals(proposals, 'indexes')

		self.proposals = []
		for i, boxes in enumerate(proposals['boxes']):
			if i % 2500 == 0:
				logger.info(' {:d}/{:d}'.format(i + 1, len(proposals['boxes'])))
			
			annotation = self.annotations[i]
			assert annotation['img_key'] == proposals['indexes'][i]
			# Remove duplicate boxes and very small boxes and then take top k
			boxes = box_utils.clip_boxes_to_image(boxes, annotation['height'], annotation['width'])
			keep = box_utils.unique_boxes(boxes)
			boxes = boxes[keep, :]
			keep = box_utils.filter_small_boxes(boxes, cfg.FAST_RCNN.MIN_PROPOSAL_SIZE)
			boxes = boxes[keep, :]
			if cfg.FAST_RCNN.TOP_K > 0:
				boxes = boxes[:cfg.FAST_RCNN.TOP_K, :]
			self.proposals.append(boxes.astype(np.float))