示例#1
0
class FeatureExtractionDemo(object):
    def __init__(self, cfg, parallel=False):
        """
        Args:
            cfg (CfgNode):
            parallel (bool) whether to run the model in different processes from visualization.:
                Useful since the visualization logic can be slow.
        """
        self.cfg = cfg
        self.parallel = parallel

        if parallel:
            self.num_gpus = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, self.num_gpus)
        else:
            self.predictor = DefaultPredictor(cfg)

    def run_on_image(self, original_image):
        """

        Args:
            original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
                This is the format used by OpenCV.

        Returns:
            predictions (np.ndarray): normalized feature of the model.
        """
        # the model expects RGB inputs
        original_image = original_image[:, :, ::-1]
        # Apply pre-processing to image.
        image = cv2.resize(original_image, tuple(self.cfg.INPUT.SIZE_TEST[::-1]), interpolation=cv2.INTER_CUBIC)
        # Make shape with a new batch dimension which is adapted for
        # network input
        image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))[None]
        predictions = self.predictor(image)
        return predictions

    def run_on_loader(self, data_loader):
        if self.parallel:
            buffer_size = self.predictor.default_buffer_size

            batch_data = deque()

            for cnt, batch in enumerate(data_loader):
                batch_data.append(batch)
                self.predictor.put(batch["images"])

                if cnt >= buffer_size:
                    batch = batch_data.popleft()
                    predictions = self.predictor.get()
                    yield predictions, batch["targets"].cpu().numpy(), batch["camids"].cpu().numpy()

            while len(batch_data):
                batch = batch_data.popleft()
                predictions = self.predictor.get()
                yield predictions, batch["targets"].cpu().numpy(), batch["camids"].cpu().numpy()
        else:
            for batch in data_loader:
                predictions = self.predictor(batch["images"])
                yield predictions, batch["targets"].cpu().numpy(), batch["camids"].cpu().numpy()
示例#2
0
class FeatureExtractionDemo(object):
    def __init__(self, cfg, device='cuda:0', parallel=False):
        """
        Args:
            cfg (CfgNode):
            parallel (bool) whether to run the model in different processes from visualization.:
                Useful since the visualization logic can be slow.
        """
        self.cfg = cfg
        self.parallel = parallel

        if parallel:
            self.num_gpus = torch.cuda.device_count()
            self.predictor = AsyncPredictor(cfg, self.num_gpus)
        else:
            self.predictor = DefaultPredictor(cfg, device)

        num_channels = len(cfg.MODEL.PIXEL_MEAN)
        self.mean = torch.tensor(cfg.MODEL.PIXEL_MEAN).view(
            1, num_channels, 1, 1)
        self.std = torch.tensor(cfg.MODEL.PIXEL_STD).view(
            1, num_channels, 1, 1)

    def run_on_image(self, original_image):
        """

        Args:
            original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
                This is the format used by OpenCV.

        Returns:
            predictions (np.ndarray): normalized feature of the model.
        """
        # the model expects RGB inputs
        original_image = original_image[:, :, ::-1]
        # Apply pre-processing to image.
        image = cv2.resize(original_image,
                           tuple(self.cfg.INPUT.SIZE_TEST[::-1]),
                           interpolation=cv2.INTER_CUBIC)
        image = torch.as_tensor(image.astype("float32").transpose(2, 0,
                                                                  1))[None]
        image.sub_(self.mean).div_(self.std)
        predictions = self.predictor(image)
        return predictions

    def run_on_loader(self, data_loader):

        image_gen = self._image_from_loader(data_loader)
        if self.parallel:
            buffer_size = self.predictor.default_buffer_size

            batch_data = deque()

            for cnt, batch in enumerate(image_gen):
                batch_data.append(batch)
                self.predictor.put(batch['images'])

                if cnt >= buffer_size:
                    batch = batch_data.popleft()
                    predictions = self.predictor.get()
                    yield predictions, batch['targets'].numpy(
                    ), batch['camid'].numpy()

            while len(batch_data):
                batch = batch_data.popleft()
                predictions = self.predictor.get()
                yield predictions, batch['targets'].numpy(
                ), batch['camid'].numpy()
        else:
            for batch in image_gen:
                predictions = self.predictor(batch['images'])
                yield predictions, batch['targets'].numpy(
                ), batch['camid'].numpy()

    def _image_from_loader(self, data_loader):
        data_loader.reset()
        data = data_loader.next()
        while data is not None:
            yield data
            data = data_loader.next()