コード例 #1
0
    def extract(self, im: torch.Tensor):
        im = im / 255
        im -= self.mean
        im /= self.std

        if self.use_gpu:
            im = im.cuda()

        with torch.no_grad():
            output_features = self.net.extract_features(
                im, self.feature_layers)

        # Store the raw resnet features which are input to iounet
        self.iounet_backbone_features = TensorList([
            output_features[layer].clone()
            for layer in self.iounet_feature_layers
        ])

        # Store the processed features from iounet, just before pooling
        with torch.no_grad():
            self.iounet_features = TensorList(
                self.iou_predictor.get_iou_feat(self.iounet_backbone_features))

        return TensorList(
            [output_features[layer] for layer in self.output_layers])
コード例 #2
0
 def __init__(self, training_samples: TensorList, y: TensorList,
              filter_reg: torch.Tensor, sample_weights: TensorList,
              response_activation):
     self.training_samples = training_samples.variable()
     self.y = y.variable()
     self.filter_reg = filter_reg
     self.sample_weights = sample_weights
     self.response_activation = response_activation
コード例 #3
0
 def size(self, im_sz):
     if self.output_size is None:
         # return TensorList([im_sz // s for s in self.stride()])
         return TensorList([im_sz / s
                            for s in self.stride()])  # pytorch0.3.1
     if isinstance(im_sz, torch.Tensor):
         return TensorList([
             im_sz // s if sz is None else torch.Tensor([sz[0], sz[1]])
             for sz, s in zip(self.output_size, self.stride())
         ])
コード例 #4
0
 def get_attribute(self, name: str, ignore_missing: bool = False):
     if ignore_missing:
         return TensorList([
             getattr(f, name) for f in self.features
             if self._return_feature(f) and hasattr(f, name)
         ])
     else:
         return TensorList([
             getattr(f, name, None) for f in self.features
             if self._return_feature(f)
         ])
コード例 #5
0
    def extract_transformed(self, im, pos, scale, image_sz, transforms, flag):
        """Extract features from a set of transformed image samples.
        args:
            im: Image.
            pos: Center position for extraction.
            scale: Image scale to extract features from.
            image_sz: Size to resize the image samples to before extraction.
            transforms: A set of image transforms to apply.
        """

        # Get image patche
        im_patch = sample_patch(im, pos, scale * image_sz, image_sz).data

        # Apply transforms
        # debug
        temp = []
        # for T in transforms:
        #     print(T)
        #     temptt = T(im_patch)
        #     temp.append(temptt)

        im_patches = torch.cat([T(im_patch).data for T in transforms])

        # Compute features
        feature_map = TensorList(
            [f.get_feature(im_patches, flag) for f in self.features]).unroll()

        return feature_map
コード例 #6
0
    def __init__(self, *args, **kwargs):
        if len(args) > 0:
            raise ValueError

        for name, val in kwargs.items():
            if isinstance(val, list):
                setattr(self, name, TensorList(val))
            else:
                setattr(self, name, val)
コード例 #7
0
 def get_fparams(self, name: str = None):
     if name is None:
         return [
             f.fparams for f in self.features if self._return_feature(f)
         ]
     return TensorList([
         getattr(f.fparams, name) for f in self.features
         if self._return_feature(f)
     ]).unroll()
コード例 #8
0
    def extract(self, im: torch.Tensor):
        im = im / 255
        im -= self.mean
        im /= self.std

        if self.use_gpu:
            im = im.cuda()

        with torch.no_grad():
            return TensorList(self.net(im).values())
コード例 #9
0
    def extract(self, im, pos, scales, image_sz):
        if isinstance(scales, (int, float)):
            scales = [scales]

        # Get image patches
        im_patches = torch.cat(
            [sample_patch(im, pos, s * image_sz, image_sz) for s in scales])

        # Compute features
        feature_map = torch.cat(TensorList(
            [f.get_feature(im_patches) for f in self.features]).unroll(),
                                dim=1)

        return feature_map
コード例 #10
0
    def __init__(self, training_samples: TensorList, y: TensorList,
                 filter_reg: torch.Tensor, projection_reg, params,
                 sample_weights: TensorList, projection_activation,
                 response_activation):
        self.training_samples = training_samples
        self.y = y.variable()
        self.filter_reg = filter_reg
        self.sample_weights = sample_weights
        self.params = params
        self.projection_reg = projection_reg
        self.projection_activation = projection_activation
        self.response_activation = response_activation

        self.diag_M = self.filter_reg.concat(projection_reg)
コード例 #11
0
    def extract(self, im: torch.Tensor, flag: str):
        im = im / 255
        try:
            im -= self.mean
        except:
            im = im.data
            im -= self.mean
        im /= self.std

        if self.use_gpu:
            im = im.cuda()

        # with torch.no_grad():  #pytorch0.3.1
        output_features = self.net.extract_features(Variable(im),
                                                    self.feature_layers, flag)

        # Store the raw resnet features which are input to iounet
        self.iounet_backbone_features = TensorList([
            output_features[layer].clone()
            for layer in self.iounet_feature_layers
        ])

        # Store the processed features from iounet, just before pooling
        # with torch.no_grad():  # pytorch0.3.1
        if flag == "RGB":
            self.iounet_features = TensorList(
                self.rgb_bb_regressor.get_iou_feat(
                    self.iounet_backbone_features))
        elif flag == "T":
            self.iounet_features = TensorList(
                self.t_bb_regressor.get_iou_feat(
                    self.iounet_backbone_features))
        else:
            raise ValueError("no this flag, please choose RGB or T")

        return TensorList(
            [output_features[layer] for layer in self.output_layers])
コード例 #12
0
    def extract(self, im, pos, scales, image_sz, flag):
        """Extract features.
        args:
            im: Image.
            pos: Center position for extraction.
            scales: Image scales to extract features from.
            image_sz: Size to resize the image samples to before extraction.
        """
        if isinstance(scales, (int, float)):
            scales = [scales]

        # Get image patches
        im_patches = torch.cat(
            [sample_patch(im, pos, s * image_sz, image_sz) for s in scales])

        # Compute features
        # debug zzp
        # for f in self.features:
        #     temp = f.get_feature(im_patches, flag)

        feature_map = TensorList(
            [f.get_feature(im_patches, flag) for f in self.features]).unroll()

        return feature_map
コード例 #13
0
 def dim(self):
     return TensorList([
         f.dim() for f in self.features if self._return_feature(f)
     ]).unroll()
コード例 #14
0
 def size(self, input_sz):
     return TensorList([
         f.size(input_sz) for f in self.features if self._return_feature(f)
     ]).unroll()
コード例 #15
0
 def stride(self):
     return torch.Tensor(
         TensorList([
             f.stride() for f in self.features if self._return_feature(f)
         ]).unroll())
コード例 #16
0
 def stride(self):
     return TensorList([
         s * self.layer_stride[l]
         for l, s in zip(self.output_layers, self.pool_stride)
     ])
コード例 #17
0
 def dim(self):
     return TensorList([self.layer_dim[l] for l in self.output_layers])