示例#1
0
class ToTensor(object):
    def __init__(self):
        self.timer = Timer()

    def call_single_view(self, image, target):
        return F.to_tensor(image), target

    def call_double_view(self, image, target):
        self.timer.tic()
        left_image, right_image = image['left'], image['right']
        left_target, right_target = target['left'], target['right']
        left_image = F.to_tensor(left_image)
        right_image = F.to_tensor(right_image)
        image = {'left': left_image, 'right': right_image}
        target = {'left': left_target, 'right': right_target}
        self.timer.toc()
        if PRINT_TIME:
            print('totensor', self.timer.average_time)
        return image, target

    def __call__(self, image, target):
        if isinstance(image, dict) and isinstance(target, dict):
            return self.call_double_view(image, target)
        else:
            return self.call_single_view(image, target)
示例#2
0
 def __init__(self, min_size, max_size):
     if not isinstance(min_size, (list, tuple)):
         min_size = (min_size, )
     self.min_size = min_size
     self.max_size = max_size
     self.getsizetimer = Timer()
     self.imgtimer = Timer()
     self.tgttimer = Timer()
示例#3
0
class Normalize(object):
    def __init__(self, mean, std, to_bgr255=True):
        self.mean = mean
        self.std = std
        self.to_bgr255 = to_bgr255
        self.timer = Timer()

    def call_single_view(self, image, target):
        # if self.to_bgr255:
        #     image = image[[2, 1, 0]] * 255
        image = F.normalize(image, mean=self.mean, std=self.std)
        return image, target

    def call_double_view(self, image, target):
        self.timer.tic()
        left_image, right_image = image['left'], image['right']
        left_target, right_target = target['left'], target['right']
        if self.to_bgr255:
            left_image = left_image[[2, 1, 0]] * 255
            right_image = right_image[[2, 1, 0]] * 255
        # left_image -= self.mean
        # right_image -= self.mean
        left_image = F.normalize(left_image, mean=self.mean, std=self.std)
        right_image = F.normalize(right_image, mean=self.mean, std=self.std)
        image = {'left': left_image, 'right': right_image}
        target = {'left': left_target, 'right': right_target}
        self.timer.toc()
        if PRINT_TIME:
            print('normalize', self.timer.average_time)
        return image, target

    def __call__(self, image, target):
        if isinstance(image, dict) and isinstance(target, dict):
            return self.call_double_view(image, target)
        else:
            return self.call_single_view(image, target)
示例#4
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch

from disprcnn.utils.timer import Timer
from .box_head.box_head import build_roi_box_head, ROIBoxHead
from .mask_head.mask_head import build_roi_mask_head, ROIMaskHead

boxheadtimer = Timer()
maskheadtimer = Timer()
PRINTTIME = False


class CombinedROIHeads(torch.nn.ModuleDict):
    """
    Combines a set of individual heads (for box prediction or masks) into a single
    head.
    """
    mask: ROIMaskHead
    box: ROIBoxHead

    def __init__(self, cfg, heads):
        super(CombinedROIHeads, self).__init__(heads)
        self.cfg = cfg.clone()
        if cfg.MODEL.MASK_ON and cfg.MODEL.ROI_MASK_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
            self.mask.feature_extractor = self.box.feature_extractor
        if cfg.MODEL.KEYPOINT_ON and cfg.MODEL.ROI_KEYPOINT_HEAD.SHARE_BOX_FEATURE_EXTRACTOR:
            self.keypoint.feature_extractor = self.box.feature_extractor

    def forward(self, features, proposals, targets=None):
        losses = {}
        # TODO rename x to roi_box_features, if it doesn't increase memory consumption
示例#5
0
class Resize(object):
    def __init__(self, min_size, max_size):
        if not isinstance(min_size, (list, tuple)):
            min_size = (min_size, )
        self.min_size = min_size
        self.max_size = max_size
        self.getsizetimer = Timer()
        self.imgtimer = Timer()
        self.tgttimer = Timer()
        # self.timer = Timer()

    # modified from torchvision to add support for max size
    def get_size(self, image_size):
        w, h = image_size
        size = random.choice(self.min_size)
        max_size = self.max_size
        if max_size is not None:
            min_original_size = float(min((w, h)))
            max_original_size = float(max((w, h)))
            if max_original_size / min_original_size * size > max_size:
                size = int(
                    round(max_size * min_original_size / max_original_size))

        if (w <= h and w == size) or (h <= w and h == size):
            return (h, w)

        if w < h:
            ow = size
            oh = int(size * h / w)
        else:
            oh = size
            ow = int(size * w / h)

        return (oh, ow)

    def call_single_view(self, image, target):
        # h, w = self.get_size(image.shape[:-1][::-1])
        # image = cv2.resize(image, None, None, fx=h / image.shape[0],
        #                    fy=h / image.shape[0], interpolation=cv2.INTER_LINEAR)
        self.getsizetimer.tic()
        h, w = self.get_size(image.size)
        self.getsizetimer.toc()
        if PRINT_TIME:
            print('getsize', self.getsizetimer.average_time)
        self.imgtimer.tic()
        image = image.resize((w, h))
        self.imgtimer.toc()
        if PRINT_TIME:
            print('img resize', self.imgtimer.average_time)
        self.tgttimer.tic()
        target = target.resize(image.size)
        self.tgttimer.toc()
        if PRINT_TIME:
            print('tgt resize', self.tgttimer.average_time)
        return image, target

    def call_double_view(self, image, target):
        # self.timer.tic()
        left_image, right_image = image['left'], image['right']
        left_target, right_target = target['left'], target['right']
        left_image, left_target = self.call_single_view(
            left_image, left_target)
        right_image, right_target = self.call_single_view(
            right_image, right_target)
        image = {'left': left_image, 'right': right_image}
        target = {'left': left_target, 'right': right_target}
        # self.timer.toc()
        # print('resize', self.timer.average_time)
        return image, target

    def __call__(self, image, target):
        if isinstance(image, dict) and isinstance(target, dict):
            return self.call_double_view(image, target)
        else:
            return self.call_single_view(image, target)
示例#6
0
 def __init__(self, mean, std, to_bgr255=True):
     self.mean = mean
     self.std = std
     self.to_bgr255 = to_bgr255
     self.timer = Timer()
示例#7
0
 def __init__(self):
     self.timer = Timer()
示例#8
0
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import torch
from torch import nn

from disprcnn.utils.timer import Timer
from .roi_box_feature_extractors import make_roi_box_feature_extractor
from .roi_box_predictors import make_roi_box_predictor
from .inference import make_roi_box_post_processor
from .loss import make_roi_box_loss_evaluator

featuretimer = Timer()
predtimer = Timer()
pptimer = Timer()
PRINTTIME = False


class ROIBoxHead(torch.nn.Module):
    """
    Generic Box Head class.
    """
    def __init__(self, cfg, in_channels):
        super(ROIBoxHead, self).__init__()
        self.feature_extractor = make_roi_box_feature_extractor(
            cfg, in_channels)
        self.predictor = make_roi_box_predictor(
            cfg, self.feature_extractor.out_channels)
        self.post_processor = make_roi_box_post_processor(cfg)
        self.loss_evaluator = make_roi_box_loss_evaluator(cfg)

    def forward_single_view(self, features, proposals, targets=None):
        """