コード例 #1
0
ファイル: test_loss.py プロジェクト: taofuyu/mmdetection
def test_classification_losses(loss_class, input_shape):
    if input_shape[0] == 0 and digit_version(
            torch.__version__) < digit_version('1.5.0'):
        pytest.skip(
            f'CELoss in PyTorch {torch.__version__} does not support empty'
            f'tensor.')

    pred = torch.rand(input_shape)
    target = torch.randint(0, 5, (input_shape[0], ))

    # Test loss forward
    loss = loss_class()(pred, target)
    assert isinstance(loss, torch.Tensor)

    # Test loss forward with reduction_override
    loss = loss_class()(pred, target, reduction_override='mean')
    assert isinstance(loss, torch.Tensor)

    # Test loss forward with avg_factor
    loss = loss_class()(pred, target, avg_factor=10)
    assert isinstance(loss, torch.Tensor)

    with pytest.raises(ValueError):
        # loss can evaluate with avg_factor only if
        # reduction is None, 'none' or 'mean'.
        reduction_override = 'sum'
        loss_class()(
            pred, target, avg_factor=10, reduction_override=reduction_override)

    # Test loss forward with avg_factor and reduction
    for reduction_override in [None, 'none', 'mean']:
        loss_class()(
            pred, target, avg_factor=10, reduction_override=reduction_override)
        assert isinstance(loss, torch.Tensor)
コード例 #2
0
    def test_single_gpu(self, _):
        common_cfg = dict(dataset=self.data,
                          samples_per_gpu=self.samples_per_gpu,
                          workers_per_gpu=self.workers_per_gpu,
                          dist=False)

        # Test default config
        dataloader = build_dataloader(**common_cfg)

        if digit_version(torch.__version__) >= digit_version('1.8.0'):
            assert dataloader.persistent_workers
        elif hasattr(dataloader, 'persistent_workers'):
            assert not dataloader.persistent_workers

        assert dataloader.batch_size == self.samples_per_gpu
        assert dataloader.num_workers == self.workers_per_gpu
        assert not all(
            torch.cat(list(iter(dataloader))) == torch.tensor(self.data))

        # Test without shuffle
        dataloader = build_dataloader(**common_cfg, shuffle=False)
        assert all(
            torch.cat(list(iter(dataloader))) == torch.tensor(self.data))

        # Test with custom sampler_cfg
        dataloader = build_dataloader(**common_cfg,
                                      sampler_cfg=dict(type='RepeatAugSampler',
                                                       selected_round=0),
                                      shuffle=False)
        expect = [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6]
        assert all(torch.cat(list(iter(dataloader))) == torch.tensor(expect))
コード例 #3
0
    def test_distributed(self, _):
        common_cfg = dict(
            dataset=self.data,
            samples_per_gpu=self.samples_per_gpu,
            workers_per_gpu=self.workers_per_gpu,
            num_gpus=2,  # num_gpus will be ignored in distributed environment.
            dist=True)

        # Test default config
        dataloader = build_dataloader(**common_cfg)

        if digit_version(torch.__version__) >= digit_version('1.8.0'):
            assert dataloader.persistent_workers
        elif hasattr(dataloader, 'persistent_workers'):
            assert not dataloader.persistent_workers

        assert dataloader.batch_size == self.samples_per_gpu
        assert dataloader.num_workers == self.workers_per_gpu
        non_expect = torch.tensor(self.data[1::2])
        assert not all(torch.cat(list(iter(dataloader))) == non_expect)

        # Test without shuffle
        dataloader = build_dataloader(**common_cfg, shuffle=False)
        expect = torch.tensor(self.data[1::2])
        assert all(torch.cat(list(iter(dataloader))) == expect)

        # Test with custom sampler_cfg
        dataloader = build_dataloader(**common_cfg,
                                      sampler_cfg=dict(type='RepeatAugSampler',
                                                       selected_round=0),
                                      shuffle=False)
        expect = torch.tensor(
            [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6][1::2])
        assert all(torch.cat(list(iter(dataloader))) == expect)
コード例 #4
0
    def __init__(self,
                 embed_dims,
                 num_heads,
                 num_frames,
                 attn_drop=0.,
                 proj_drop=0.,
                 dropout_layer=dict(type='DropPath', drop_prob=0.1),
                 norm_cfg=dict(type='LN'),
                 init_cfg=None,
                 **kwargs):
        super().__init__(init_cfg)
        self.embed_dims = embed_dims
        self.num_heads = num_heads
        self.num_frames = num_frames
        self.norm = build_norm_layer(norm_cfg, self.embed_dims)[1]

        if digit_version(torch.__version__) < digit_version('1.9.0'):
            kwargs.pop('batch_first', None)
        self.attn = nn.MultiheadAttention(embed_dims, num_heads, attn_drop,
                                          **kwargs)
        self.proj_drop = nn.Dropout(proj_drop)
        self.dropout_layer = build_dropout(
            dropout_layer) if dropout_layer else nn.Identity()
        self.temporal_fc = nn.Linear(self.embed_dims, self.embed_dims)

        self.init_weights()
コード例 #5
0
def test_load_url():
    url1 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.5.pth'
    url2 = 'https://download.openmmlab.com/mmcv/test_data/saved_in_pt1.6.pth'

    # The 1.6 release of PyTorch switched torch.save to use a new zipfile-based
    # file format. It will cause RuntimeError when a checkpoint was saved in
    # torch >= 1.6.0 but loaded in torch < 1.7.0.
    # More details at https://github.com/open-mmlab/mmpose/issues/904
    if digit_version(TORCH_VERSION) < digit_version('1.7.0'):
        model_zoo.load_url(url1)
        with pytest.raises(RuntimeError):
            model_zoo.load_url(url2)
    else:
        # high version of PyTorch can load checkpoints from url, regardless
        # of which version they were saved in
        model_zoo.load_url(url1)
        model_zoo.load_url(url2)

    load_url(url1)
    # if a checkpoint was saved in torch >= 1.6.0 but loaded in torch < 1.5.0,
    # it will raise a RuntimeError
    if digit_version(TORCH_VERSION) < digit_version('1.5.0'):
        with pytest.raises(RuntimeError):
            load_url(url2)
    else:
        load_url(url2)
コード例 #6
0
ファイル: fp16_utils.py プロジェクト: v-qjqs/mmcv
def wrap_fp16_model(model):
    """Wrap the FP32 model to FP16.

    If you are using PyTorch >= 1.6, torch.cuda.amp is used as the
    backend, otherwise, original mmcv implementation will be adopted.

    For PyTorch >= 1.6, this function will
    1. Set fp16 flag inside the model to True.

    Otherwise:
    1. Convert FP32 model to FP16.
    2. Remain some necessary layers to be FP32, e.g., normalization layers.
    3. Set `fp16_enabled` flag inside the model to True.

    Args:
        model (nn.Module): Model in FP32.
    """
    if (TORCH_VERSION == 'parrots'
            or digit_version(TORCH_VERSION) < digit_version('1.6.0')):
        # convert model to fp16
        model.half()
        # patch the normalization layers to make it work in fp32 mode
        patch_norm_fp32(model)
    # set `fp16_enabled` flag
    for m in model.modules():
        if hasattr(m, 'fp16_enabled'):
            m.fp16_enabled = True
コード例 #7
0
    def test_mdconv(self):
        self._test_mdconv(torch.double)
        self._test_mdconv(torch.float)
        self._test_mdconv(torch.half)

        # test amp when torch version >= '1.6.0', the type of
        # input data for mdconv might be torch.float or torch.half
        if (TORCH_VERSION != 'parrots'
                and digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
            with autocast(enabled=True):
                self._test_amp_mdconv(torch.float)
                self._test_amp_mdconv(torch.half)
コード例 #8
0
ファイル: test_deform_conv.py プロジェクト: OceanPang/mmcv
    def test_deformconv(self):
        self._test_deformconv(torch.double, device='cpu')
        self._test_deformconv(torch.float, device='cpu', threshold=1e-1)
        self._test_deformconv(torch.double)
        self._test_deformconv(torch.float)
        self._test_deformconv(torch.half, threshold=1e-1)

        # test amp when torch version >= '1.6.0', the type of
        # input data for deformconv might be torch.float or torch.half
        if (TORCH_VERSION != 'parrots'
                and digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
            with autocast(enabled=True):
                self._test_amp_deformconv(torch.float, 1e-1)
                self._test_amp_deformconv(torch.half, 1e-1)
コード例 #9
0
 def _sync_params(self):
     module_states = list(self.module.state_dict().values())
     if len(module_states) > 0:
         self._dist_broadcast_coalesced(module_states,
                                        self.broadcast_bucket_size)
     if self.broadcast_buffers:
         if (TORCH_VERSION != 'parrots'
                 and digit_version(TORCH_VERSION) < digit_version('1.0')):
             buffers = [b.data for b in self.module._all_buffers()]
         else:
             buffers = [b.data for b in self.module.buffers()]
         if len(buffers) > 0:
             self._dist_broadcast_coalesced(buffers,
                                            self.broadcast_bucket_size)
コード例 #10
0
def is_tracing() -> bool:
    if digit_version(torch.__version__) >= digit_version('1.6.0'):
        on_trace = torch.jit.is_tracing()
        # In PyTorch 1.6, torch.jit.is_tracing has a bug.
        # Refers to https://github.com/pytorch/pytorch/issues/42448
        if isinstance(on_trace, bool):
            return on_trace
        else:
            return torch._C._is_tracing()
    else:
        warnings.warn(
            'torch.jit.is_tracing is only supported after v1.6.0. '
            'Therefore is_tracing returns False automatically. Please '
            'set on_trace manually if you are using trace.', UserWarning)
        return False
コード例 #11
0
def get_dist_info():
    if (TORCH_VERSION != 'parrots'
            and digit_version(TORCH_VERSION) < digit_version('1.0')):
        initialized = dist._initialized
    else:
        if dist.is_available():
            initialized = dist.is_initialized()
        else:
            initialized = False
    if initialized:
        rank = dist.get_rank()
        world_size = dist.get_world_size()
    else:
        rank = 0
        world_size = 1
    return rank, world_size
コード例 #12
0
    def forward(self, x):
        # pre-context
        avg_x = F.adaptive_avg_pool2d(x, output_size=1)
        avg_x = self.pre_context(avg_x)
        avg_x = avg_x.expand_as(x)
        x = x + avg_x
        # switch
        avg_x = F.pad(x, pad=(2, 2, 2, 2), mode='reflect')
        avg_x = F.avg_pool2d(avg_x, kernel_size=5, stride=1, padding=0)
        switch = self.switch(avg_x)
        # sac
        weight = self._get_weight(self.weight)
        zero_bias = torch.zeros(self.out_channels,
                                device=weight.device,
                                dtype=weight.dtype)

        if self.use_deform:
            offset = self.offset_s(avg_x)
            out_s = deform_conv2d(x, offset, weight, self.stride, self.padding,
                                  self.dilation, self.groups, 1)
        else:
            if (TORCH_VERSION == 'parrots'
                    or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
                out_s = super().conv2d_forward(x, weight)
            elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
                # bias is a required argument of _conv_forward in torch 1.8.0
                out_s = super()._conv_forward(x, weight, zero_bias)
            else:
                out_s = super()._conv_forward(x, weight)
        ori_p = self.padding
        ori_d = self.dilation
        self.padding = tuple(3 * p for p in self.padding)
        self.dilation = tuple(3 * d for d in self.dilation)
        weight = weight + self.weight_diff
        if self.use_deform:
            offset = self.offset_l(avg_x)
            out_l = deform_conv2d(x, offset, weight, self.stride, self.padding,
                                  self.dilation, self.groups, 1)
        else:
            if (TORCH_VERSION == 'parrots'
                    or digit_version(TORCH_VERSION) < digit_version('1.5.0')):
                out_l = super().conv2d_forward(x, weight)
            elif digit_version(TORCH_VERSION) >= digit_version('1.8.0'):
                # bias is a required argument of _conv_forward in torch 1.8.0
                out_l = super()._conv_forward(x, weight, zero_bias)
            else:
                out_l = super()._conv_forward(x, weight)

        out = switch * out_s + (1 - switch) * out_l
        self.padding = ori_p
        self.dilation = ori_d
        # post-context
        avg_x = F.adaptive_avg_pool2d(out, output_size=1)
        avg_x = self.post_context(avg_x)
        avg_x = avg_x.expand_as(out)
        out = out + avg_x
        return out
コード例 #13
0
ファイル: distributed.py プロジェクト: www516717402/mmcv
    def train_step(self, *inputs, **kwargs):
        """train_step() API for module wrapped by DistributedDataParallel.

        This method is basically the same as
        ``DistributedDataParallel.forward()``, while replacing
        ``self.module.forward()`` with ``self.module.train_step()``.
        It is compatible with PyTorch 1.1 - 1.5.
        """

        # In PyTorch >= 1.7, ``reducer._rebuild_buckets()`` is moved from the
        # end of backward to the beginning of forward.
        if ('parrots' not in TORCH_VERSION
                and digit_version(TORCH_VERSION) >= digit_version('1.7')
                and self.reducer._rebuild_buckets()):
            print_log(
                'Reducer buckets have been rebuilt in this iteration.',
                logger='mmcv')

        if ('parrots' not in TORCH_VERSION
                and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
            if self._check_sync_bufs_pre_fwd():
                self._sync_buffers()
        else:
            if (getattr(self, 'require_forward_param_sync', False)
                    and self.require_forward_param_sync):
                self._sync_params()

        if self.device_ids:
            inputs, kwargs = self.scatter(inputs, kwargs, self.device_ids)
            if len(self.device_ids) == 1:
                output = self.module.train_step(*inputs[0], **kwargs[0])
            else:
                outputs = self.parallel_apply(
                    self._module_copies[:len(inputs)], inputs, kwargs)
                output = self.gather(outputs, self.output_device)
        else:
            output = self.module.train_step(*inputs, **kwargs)

        if ('parrots' not in TORCH_VERSION
                and digit_version(TORCH_VERSION) >= digit_version('1.11.0')):
            if self._check_sync_bufs_post_fwd():
                self._sync_buffers()

        if (torch.is_grad_enabled()
                and getattr(self, 'require_backward_grad_sync', False)
                and self.require_backward_grad_sync):
            if self.find_unused_parameters:
                self.reducer.prepare_for_backward(list(_find_tensors(output)))
            else:
                self.reducer.prepare_for_backward([])
        else:
            if ('parrots' not in TORCH_VERSION
                    and digit_version(TORCH_VERSION) > digit_version('1.2')):
                self.require_forward_param_sync = False
        return output
コード例 #14
0
    def import_wandb(self):
        try:
            import wandb
            from wandb import init  # noqa

            # Fix ResourceWarning when calling wandb.log in wandb v0.12.10.
            # https://github.com/wandb/client/issues/2837
            if digit_version(wandb.__version__) < digit_version('0.12.10'):
                warnings.warn(
                    f'The current wandb {wandb.__version__} is '
                    f'lower than v0.12.10 will cause ResourceWarning '
                    f'when calling wandb.log, Please run '
                    f'"pip install --upgrade wandb"')

        except ImportError:
            raise ImportError(
                'Please run "pip install "wandb>=0.12.10"" to install wandb')
        self.wandb = wandb
コード例 #15
0
ファイル: fp16_utils.py プロジェクト: v-qjqs/mmcv
        def new_func(*args, **kwargs):
            # check if the module has set the attribute `fp16_enabled`, if not,
            # just fallback to the original method.
            if not isinstance(args[0], torch.nn.Module):
                raise TypeError('@auto_fp16 can only be used to decorate the '
                                'method of nn.Module')
            if not (hasattr(args[0], 'fp16_enabled') and args[0].fp16_enabled):
                return old_func(*args, **kwargs)

            # get the arg spec of the decorated method
            args_info = getfullargspec(old_func)
            # get the argument names to be casted
            args_to_cast = args_info.args if apply_to is None else apply_to
            # convert the args that need to be processed
            new_args = []
            # NOTE: default args are not taken into consideration
            if args:
                arg_names = args_info.args[:len(args)]
                for i, arg_name in enumerate(arg_names):
                    if arg_name in args_to_cast:
                        new_args.append(
                            cast_tensor_type(args[i], torch.float, torch.half))
                    else:
                        new_args.append(args[i])
            # convert the kwargs that need to be processed
            new_kwargs = {}
            if kwargs:
                for arg_name, arg_value in kwargs.items():
                    if arg_name in args_to_cast:
                        new_kwargs[arg_name] = cast_tensor_type(
                            arg_value, torch.float, torch.half)
                    else:
                        new_kwargs[arg_name] = arg_value
            # apply converted arguments to the decorated method
            if (TORCH_VERSION != 'parrots' and
                    digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
                with autocast(enabled=True):
                    output = old_func(*new_args, **new_kwargs)
            else:
                output = old_func(*new_args, **new_kwargs)
            # cast the results back to fp32 if necessary
            if out_fp32:
                output = cast_tensor_type(output, torch.half, torch.float)
            return output
コード例 #16
0
    def test_deformconv(self):
        self._test_deformconv(torch.double, device='cpu')
        self._test_deformconv(torch.float, device='cpu', threshold=1e-1)
        self._test_deformconv(torch.double)
        self._test_deformconv(torch.float)
        self._test_deformconv(torch.half, threshold=1e-1)
        # test batch_size < im2col_step
        self._test_deformconv(torch.float, batch_size=1, im2col_step=2)
        # test bach_size % im2col_step != 0
        with pytest.raises(
                AssertionError,
                match='batch size must be divisible by im2col_step'):
            self._test_deformconv(torch.float, batch_size=10, im2col_step=3)

        # test amp when torch version >= '1.6.0', the type of
        # input data for deformconv might be torch.float or torch.half
        if (TORCH_VERSION != 'parrots'
                and digit_version(TORCH_VERSION) >= digit_version('1.6.0')):
            with autocast(enabled=True):
                self._test_amp_deformconv(torch.float, 1e-1)
                self._test_amp_deformconv(torch.half, 1e-1)
コード例 #17
0
    def before_run(self, runner):
        super(TensorboardLoggerHook, self).before_run(runner)
        if (TORCH_VERSION == 'parrots'
                or digit_version(TORCH_VERSION) < digit_version('1.1')):
            try:
                from tensorboardX import SummaryWriter
            except ImportError:
                raise ImportError('Please install tensorboardX to use '
                                  'TensorboardLoggerHook.')
        else:
            try:
                from torch.utils.tensorboard import SummaryWriter
            except ImportError:
                raise ImportError(
                    'Please run "pip install future tensorboard" to install '
                    'the dependencies to use torch.utils.tensorboard '
                    '(applicable to PyTorch 1.1 or higher)')

        if self.log_dir is None:
            self.log_dir = osp.join(runner.work_dir, 'tf_logs')
        self.writer = SummaryWriter(self.log_dir)
コード例 #18
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 inner_channels,
                 deform_groups=17,
                 dilations=(3, 6, 12, 18, 24),
                 trans_conv_kernel=1,
                 res_blocks_cfg=None,
                 offsets_kernel=3,
                 deform_conv_kernel=3,
                 in_index=0,
                 input_transform=None,
                 freeze_trans_layer=True,
                 norm_eval=False,
                 im2col_step=80):
        super().__init__()
        self.in_channels = in_channels
        self.out_channels = out_channels
        self.inner_channels = inner_channels
        self.deform_groups = deform_groups
        self.dilations = dilations
        self.trans_conv_kernel = trans_conv_kernel
        self.res_blocks_cfg = res_blocks_cfg
        self.offsets_kernel = offsets_kernel
        self.deform_conv_kernel = deform_conv_kernel
        self.in_index = in_index
        self.input_transform = input_transform
        self.freeze_trans_layer = freeze_trans_layer
        self.norm_eval = norm_eval
        self.im2col_step = im2col_step

        identity_trans_layer = False

        assert trans_conv_kernel in [0, 1, 3]
        kernel_size = trans_conv_kernel
        if kernel_size == 3:
            padding = 1
        elif kernel_size == 1:
            padding = 0
        else:
            # 0 for Identity mapping.
            identity_trans_layer = True

        if identity_trans_layer:
            self.trans_layer = nn.Identity()
        else:
            self.trans_layer = build_conv_layer(cfg=dict(type='Conv2d'),
                                                in_channels=in_channels,
                                                out_channels=out_channels,
                                                kernel_size=kernel_size,
                                                stride=1,
                                                padding=padding)

        # build chain of residual blocks
        if res_blocks_cfg is not None and not isinstance(res_blocks_cfg, dict):
            raise TypeError('res_blocks_cfg should be dict or None.')

        if res_blocks_cfg is None:
            block_type = 'BASIC'
            num_blocks = 20
        else:
            block_type = res_blocks_cfg.get('block', 'BASIC')
            num_blocks = res_blocks_cfg.get('num_blocks', 20)

        block = self.blocks_dict[block_type]

        res_layers = []
        downsample = nn.Sequential(
            build_conv_layer(cfg=dict(type='Conv2d'),
                             in_channels=out_channels,
                             out_channels=inner_channels,
                             kernel_size=1,
                             stride=1,
                             bias=False),
            build_norm_layer(dict(type='BN'), inner_channels)[1])
        res_layers.append(
            block(in_channels=out_channels,
                  out_channels=inner_channels,
                  downsample=downsample))

        for _ in range(1, num_blocks):
            res_layers.append(block(inner_channels, inner_channels))
        self.offset_feats = nn.Sequential(*res_layers)

        # build offset layers
        self.num_offset_layers = len(dilations)
        assert self.num_offset_layers > 0, 'Number of offset layers ' \
            'should be larger than 0.'

        target_offset_channels = 2 * offsets_kernel**2 * deform_groups

        offset_layers = [
            build_conv_layer(
                cfg=dict(type='Conv2d'),
                in_channels=inner_channels,
                out_channels=target_offset_channels,
                kernel_size=offsets_kernel,
                stride=1,
                dilation=dilations[i],
                padding=dilations[i],
                bias=False,
            ) for i in range(self.num_offset_layers)
        ]
        self.offset_layers = nn.ModuleList(offset_layers)

        # build deformable conv layers
        assert digit_version(mmcv.__version__) >= \
            digit_version(self.minimum_mmcv_version), \
            f'Current MMCV version: {mmcv.__version__}, ' \
            f'but MMCV >= {self.minimum_mmcv_version} is required, see ' \
            f'https://github.com/open-mmlab/mmcv/issues/1440, ' \
            f'Please install the latest MMCV.'

        if has_mmcv_full:
            deform_conv_layers = [
                DeformConv2d(
                    in_channels=out_channels,
                    out_channels=out_channels,
                    kernel_size=deform_conv_kernel,
                    stride=1,
                    padding=int(deform_conv_kernel / 2) * dilations[i],
                    dilation=dilations[i],
                    deform_groups=deform_groups,
                    im2col_step=self.im2col_step,
                ) for i in range(self.num_offset_layers)
            ]
        else:
            raise ImportError('Please install the full version of mmcv '
                              'to use `DeformConv2d`.')

        self.deform_conv_layers = nn.ModuleList(deform_conv_layers)

        self.freeze_layers()
コード例 #19
0
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch

from mmcv.utils import digit_version, is_jit_tracing


@pytest.mark.skipif(digit_version(torch.__version__) < digit_version('1.6.0'),
                    reason='torch.jit.is_tracing is not available before 1.6.0'
                    )
def test_is_jit_tracing():
    def foo(x):
        if is_jit_tracing():
            return x
        else:
            return x.tolist()

    x = torch.rand(3)
    # test without trace
    assert isinstance(foo(x), list)

    # test with trace
    traced_foo = torch.jit.trace(foo, (torch.rand(1), ))
    assert isinstance(traced_foo(x), torch.Tensor)
コード例 #20
0
ファイル: parrots_wrapper.py プロジェクト: hhaAndroid/mmcv
# Copyright (c) OpenMMLab. All rights reserved.
from functools import partial

import torch

from mmcv.utils import digit_version

TORCH_VERSION = torch.__version__

is_rocm_pytorch = False
if (TORCH_VERSION != 'parrots'
        and digit_version(TORCH_VERSION) >= digit_version('1.5')):
    from torch.utils.cpp_extension import ROCM_HOME
    is_rocm_pytorch = True if ((torch.version.hip is not None) and
                               (ROCM_HOME is not None)) else False


def _get_cuda_home():
    if TORCH_VERSION == 'parrots':
        from parrots.utils.build_extension import CUDA_HOME
    else:
        if is_rocm_pytorch:
            from torch.utils.cpp_extension import ROCM_HOME
            CUDA_HOME = ROCM_HOME
        else:
            from torch.utils.cpp_extension import CUDA_HOME
    return CUDA_HOME


def get_build_config():
    if TORCH_VERSION == 'parrots':
コード例 #21
0
ファイル: builder.py プロジェクト: plyfager/mmgeneration
def build_dataloader(dataset,
                     samples_per_gpu,
                     workers_per_gpu,
                     num_gpus=1,
                     dist=True,
                     shuffle=True,
                     seed=None,
                     persistent_workers=False,
                     **kwargs):
    """Build PyTorch DataLoader.

    In distributed training, each GPU/process has a dataloader.
    In non-distributed training, there is only one dataloader for all GPUs.

    Args:
        dataset (Dataset): A PyTorch dataset.
        samples_per_gpu (int): Number of training samples on each GPU, i.e.,
            batch size of each GPU.
        workers_per_gpu (int): How many subprocesses to use for data loading
            for each GPU.
        num_gpus (int): Number of GPUs. Only used in non-distributed training.
        dist (bool): Distributed training/test or not. Default: True.
        shuffle (bool): Whether to shuffle the data at every epoch.
            Default: True.
        persistent_workers (bool, optional): If True, the data loader will
            not shutdown the worker processes after a dataset has been
            consumed once. This allows to maintain the workers Dataset
            instances alive. The argument also has effect in PyTorch>=1.7.0.
            Default: False.
        kwargs: any keyword argument to be used to initialize DataLoader

    Returns:
        DataLoader: A PyTorch dataloader.
    """
    rank, world_size = get_dist_info()
    if dist:
        sampler = DistributedSampler(dataset,
                                     world_size,
                                     rank,
                                     shuffle=shuffle,
                                     samples_per_gpu=samples_per_gpu,
                                     seed=seed)
        shuffle = False
        batch_size = samples_per_gpu
        num_workers = workers_per_gpu
    else:
        sampler = None
        batch_size = num_gpus * samples_per_gpu
        num_workers = num_gpus * workers_per_gpu

    init_fn = partial(
        worker_init_fn, num_workers=num_workers, rank=rank,
        seed=seed) if seed is not None else None

    if (digit_version(TORCH_VERSION) >= digit_version('1.7.0')
            and TORCH_VERSION != 'parrots'):
        kwargs['persistent_workers'] = persistent_workers
    elif persistent_workers is True:
        warnings.warn('persistent_workers is invalid because your pytorch '
                      'version is lower than 1.7.0')

    data_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             sampler=sampler,
                             num_workers=num_workers,
                             collate_fn=partial(
                                 collate, samples_per_gpu=samples_per_gpu),
                             shuffle=shuffle,
                             worker_init_fn=init_fn,
                             **kwargs)

    return data_loader
コード例 #22
0
ファイル: builder.py プロジェクト: MakeCent/mmaction2
def build_dataloader(dataset,
                     videos_per_gpu,
                     workers_per_gpu,
                     num_gpus=1,
                     dist=True,
                     shuffle=True,
                     seed=None,
                     drop_last=False,
                     pin_memory=True,
                     persistent_workers=False,
                     **kwargs):
    """Build PyTorch DataLoader.

    In distributed training, each GPU/process has a dataloader.
    In non-distributed training, there is only one dataloader for all GPUs.

    Args:
        dataset (:obj:`Dataset`): A PyTorch dataset.
        videos_per_gpu (int): Number of videos on each GPU, i.e.,
            batch size of each GPU.
        workers_per_gpu (int): How many subprocesses to use for data
            loading for each GPU.
        num_gpus (int): Number of GPUs. Only used in non-distributed
            training. Default: 1.
        dist (bool): Distributed training/test or not. Default: True.
        shuffle (bool): Whether to shuffle the data at every epoch.
            Default: True.
        seed (int | None): Seed to be used. Default: None.
        drop_last (bool): Whether to drop the last incomplete batch in epoch.
            Default: False
        pin_memory (bool): Whether to use pin_memory in DataLoader.
            Default: True
        persistent_workers (bool): If True, the data loader will not shutdown
            the worker processes after a dataset has been consumed once.
            This allows to maintain the workers Dataset instances alive.
            The argument also has effect in PyTorch>=1.8.0.
            Default: False
        kwargs (dict, optional): Any keyword argument to be used to initialize
            DataLoader.

    Returns:
        DataLoader: A PyTorch dataloader.
    """
    rank, world_size = get_dist_info()
    sample_by_class = getattr(dataset, 'sample_by_class', False)

    if dist:
        if sample_by_class:
            dynamic_length = getattr(dataset, 'dynamic_length', True)
            sampler = ClassSpecificDistributedSampler(
                dataset,
                world_size,
                rank,
                dynamic_length=dynamic_length,
                shuffle=shuffle,
                seed=seed)
        else:
            sampler = DistributedSampler(
                dataset, world_size, rank, shuffle=shuffle, seed=seed)
        shuffle = False
        batch_size = videos_per_gpu
        num_workers = workers_per_gpu
    else:
        sampler = None
        batch_size = num_gpus * videos_per_gpu
        num_workers = num_gpus * workers_per_gpu

    init_fn = partial(
        worker_init_fn, num_workers=num_workers, rank=rank,
        seed=seed) if seed is not None else None

    if digit_version(torch.__version__) >= digit_version('1.8.0'):
        kwargs['persistent_workers'] = persistent_workers

    data_loader = DataLoader(
        dataset,
        batch_size=batch_size,
        sampler=sampler,
        num_workers=num_workers,
        collate_fn=partial(collate, samples_per_gpu=videos_per_gpu),
        pin_memory=pin_memory,
        shuffle=shuffle,
        worker_init_fn=init_fn,
        drop_last=drop_last,
        **kwargs)

    return data_loader
コード例 #23
0
def test_digit_version():
    assert digit_version('0.2.16') == (0, 2, 16, 0, 0, 0)
    assert digit_version('1.2.3') == (1, 2, 3, 0, 0, 0)
    assert digit_version('1.2.3rc0') == (1, 2, 3, 0, -1, 0)
    assert digit_version('1.2.3rc1') == (1, 2, 3, 0, -1, 1)
    assert digit_version('1.0rc0') == (1, 0, 0, 0, -1, 0)
    assert digit_version('1.0') == digit_version('1.0.0')
    assert digit_version('1.5.0+cuda90_cudnn7.6.3_lms') == digit_version('1.5')
    assert digit_version('1.0.0dev') < digit_version('1.0.0a')
    assert digit_version('1.0.0a') < digit_version('1.0.0a1')
    assert digit_version('1.0.0a') < digit_version('1.0.0b')
    assert digit_version('1.0.0b') < digit_version('1.0.0rc')
    assert digit_version('1.0.0rc1') < digit_version('1.0.0')
    assert digit_version('1.0.0') < digit_version('1.0.0post')
    assert digit_version('1.0.0post') < digit_version('1.0.0post1')
    assert digit_version('v1') == (1, 0, 0, 0, 0, 0)
    assert digit_version('v1.1.5') == (1, 1, 5, 0, 0, 0)
    with pytest.raises(AssertionError):
        digit_version('a')
    with pytest.raises(AssertionError):
        digit_version('1x')
    with pytest.raises(AssertionError):
        digit_version('1.x')
コード例 #24
0
try:
    from mmcv.utils import digit_version
except ImportError:

    def digit_version(version_str):
        digit_ver = []
        for x in version_str.split('.'):
            if x.isdigit():
                digit_ver.append(int(x))
            elif x.find('rc') != -1:
                patch_version = x.split('rc')
                digit_ver.append(int(patch_version[0]) - 1)
                digit_ver.append(int(patch_version[1]))
        return digit_ver


MMCV_MIN = '1.3'
MMCV_MAX = '1.5'

mmcv_min_version = digit_version(MMCV_MIN)
mmcv_max_version = digit_version(MMCV_MAX)
mmcv_version = digit_version(mmcv.__version__)


assert (mmcv_min_version <= mmcv_version <= mmcv_max_version), \
    f'MMCV=={mmcv.__version__} is used but incompatible. ' \
    f'Please install mmcv>={mmcv_min_version}, <={mmcv_max_version}.'

__all__ = ['__version__', 'version_info']
コード例 #25
0
ファイル: test_conv_module.py プロジェクト: www516717402/mmcv
def test_conv_module():
    with pytest.raises(AssertionError):
        # conv_cfg must be a dict or None
        conv_cfg = 'conv'
        ConvModule(3, 8, 2, conv_cfg=conv_cfg)

    with pytest.raises(AssertionError):
        # norm_cfg must be a dict or None
        norm_cfg = 'norm'
        ConvModule(3, 8, 2, norm_cfg=norm_cfg)

    with pytest.raises(KeyError):
        # softmax is not supported
        act_cfg = dict(type='softmax')
        ConvModule(3, 8, 2, act_cfg=act_cfg)

    # conv + norm + act
    conv = ConvModule(3, 8, 2, norm_cfg=dict(type='BN'))
    assert conv.with_activation
    assert hasattr(conv, 'activate')
    assert conv.with_norm
    assert hasattr(conv, 'norm')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv + act
    conv = ConvModule(3, 8, 2)
    assert conv.with_activation
    assert hasattr(conv, 'activate')
    assert not conv.with_norm
    assert conv.norm is None
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv
    conv = ConvModule(3, 8, 2, act_cfg=None)
    assert not conv.with_norm
    assert conv.norm is None
    assert not conv.with_activation
    assert not hasattr(conv, 'activate')
    x = torch.rand(1, 3, 256, 256)
    output = conv(x)
    assert output.shape == (1, 8, 255, 255)

    # conv with its own `init_weights` method
    conv_module = ConvModule(3,
                             8,
                             2,
                             conv_cfg=dict(type='ExampleConv'),
                             act_cfg=None)
    assert torch.equal(conv_module.conv.conv0.weight, torch.zeros(8, 3, 2, 2))

    # with_spectral_norm=True
    conv = ConvModule(3, 8, 3, padding=1, with_spectral_norm=True)
    assert hasattr(conv.conv, 'weight_orig')
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # padding_mode='reflect'
    conv = ConvModule(3, 8, 3, padding=1, padding_mode='reflect')
    assert isinstance(conv.padding_layer, nn.ReflectionPad2d)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # non-existing padding mode
    with pytest.raises(KeyError):
        conv = ConvModule(3, 8, 3, padding=1, padding_mode='non_exists')

    # leaky relu
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='LeakyReLU'))
    assert isinstance(conv.activate, nn.LeakyReLU)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # tanh
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Tanh'))
    assert isinstance(conv.activate, nn.Tanh)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # Sigmoid
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='Sigmoid'))
    assert isinstance(conv.activate, nn.Sigmoid)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # PReLU
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='PReLU'))
    assert isinstance(conv.activate, nn.PReLU)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # HSwish
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSwish'))
    if (TORCH_VERSION == 'parrots'
            or digit_version(TORCH_VERSION) < digit_version('1.7')):
        assert isinstance(conv.activate, HSwish)
    else:
        assert isinstance(conv.activate, nn.Hardswish)

    output = conv(x)
    assert output.shape == (1, 8, 256, 256)

    # HSigmoid
    conv = ConvModule(3, 8, 3, padding=1, act_cfg=dict(type='HSigmoid'))
    assert isinstance(conv.activate, HSigmoid)
    output = conv(x)
    assert output.shape == (1, 8, 256, 256)
コード例 #26
0
def build_dataloader(dataset,
                     samples_per_gpu,
                     workers_per_gpu,
                     num_gpus=1,
                     dist=True,
                     shuffle=True,
                     round_up=True,
                     seed=None,
                     pin_memory=True,
                     persistent_workers=True,
                     sampler_cfg=None,
                     **kwargs):
    """Build PyTorch DataLoader.

    In distributed training, each GPU/process has a dataloader.
    In non-distributed training, there is only one dataloader for all GPUs.

    Args:
        dataset (Dataset): A PyTorch dataset.
        samples_per_gpu (int): Number of training samples on each GPU, i.e.,
            batch size of each GPU.
        workers_per_gpu (int): How many subprocesses to use for data loading
            for each GPU.
        num_gpus (int): Number of GPUs. Only used in non-distributed training.
        dist (bool): Distributed training/test or not. Default: True.
        shuffle (bool): Whether to shuffle the data at every epoch.
            Default: True.
        round_up (bool): Whether to round up the length of dataset by adding
            extra samples to make it evenly divisible. Default: True.
        pin_memory (bool): Whether to use pin_memory in DataLoader.
            Default: True
        persistent_workers (bool): If True, the data loader will not shutdown
            the worker processes after a dataset has been consumed once.
            This allows to maintain the workers Dataset instances alive.
            The argument also has effect in PyTorch>=1.7.0.
            Default: True
        sampler_cfg (dict): sampler configuration to override the default
            sampler
        kwargs: any keyword argument to be used to initialize DataLoader

    Returns:
        DataLoader: A PyTorch dataloader.
    """
    rank, world_size = get_dist_info()

    # Custom sampler logic
    if sampler_cfg:
        # shuffle=False when val and test
        sampler_cfg.update(shuffle=shuffle)
        sampler = build_sampler(sampler_cfg,
                                default_args=dict(dataset=dataset,
                                                  num_replicas=world_size,
                                                  rank=rank))
    # Default sampler logic
    elif dist:
        sampler = build_sampler(
            dict(type='DistributedSampler',
                 dataset=dataset,
                 num_replicas=world_size,
                 rank=rank,
                 shuffle=shuffle,
                 round_up=round_up))
    else:
        sampler = None

    # If sampler exists, turn off dataloader shuffle
    if sampler is not None:
        shuffle = False

    if dist:
        batch_size = samples_per_gpu
        num_workers = workers_per_gpu
    else:
        batch_size = num_gpus * samples_per_gpu
        num_workers = num_gpus * workers_per_gpu

    init_fn = partial(
        worker_init_fn, num_workers=num_workers, rank=rank,
        seed=seed) if seed is not None else None

    if digit_version(torch.__version__) >= digit_version('1.8.0'):
        kwargs['persistent_workers'] = persistent_workers

    data_loader = DataLoader(dataset,
                             batch_size=batch_size,
                             sampler=sampler,
                             num_workers=num_workers,
                             collate_fn=partial(
                                 collate, samples_per_gpu=samples_per_gpu),
                             pin_memory=pin_memory,
                             shuffle=shuffle,
                             worker_init_fn=init_fn,
                             **kwargs)

    return data_loader
コード例 #27
0
# Copyright (c) OpenMMLab. All rights reserved.
import os
import random

import numpy as np
import torch

from mmcv.runner import set_random_seed
from mmcv.utils import TORCH_VERSION, digit_version

is_rocm_pytorch = False
if digit_version(TORCH_VERSION) >= digit_version('1.5'):
    from torch.utils.cpp_extension import ROCM_HOME
    is_rocm_pytorch = True if ((torch.version.hip is not None) and
                               (ROCM_HOME is not None)) else False


def test_set_random_seed():
    set_random_seed(0)
    a_random = random.randint(0, 10)
    a_np_random = np.random.rand(2, 2)
    a_torch_random = torch.rand(2, 2)
    assert torch.backends.cudnn.deterministic is False
    assert torch.backends.cudnn.benchmark is False
    assert os.environ['PYTHONHASHSEED'] == str(0)

    set_random_seed(0, True)
    b_random = random.randint(0, 10)
    b_np_random = np.random.rand(2, 2)
    b_torch_random = torch.rand(2, 2)
    assert torch.backends.cudnn.deterministic is True
コード例 #28
0
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import pytest
from mmcv.utils import assert_dict_has_keys, digit_version

try:
    import torch

    from mmaction.datasets.pipelines import PytorchVideoTrans
    pytorchvideo_ok = False
    if digit_version(torch.__version__) >= digit_version('1.8.0'):
        pytorchvideo_ok = True
except (ImportError, ModuleNotFoundError):
    pytorchvideo_ok = False


@pytest.mark.skipif(not pytorchvideo_ok, reason='torch >= 1.8.0 is required')
class TestPytorchVideoTrans:
    @staticmethod
    def test_pytorchvideo_trans():
        with pytest.raises(AssertionError):
            # transforms not supported in pytorchvideo
            PytorchVideoTrans(type='BlaBla')

        with pytest.raises(AssertionError):
            # This trans exists in pytorchvideo but not supported in MMAction2
            PytorchVideoTrans(type='MixUp')

        target_keys = ['imgs']

        imgs = list(np.random.randint(0, 256, (4, 32, 32, 3)).astype(np.uint8))
コード例 #29
0
    def __init__(self,
                 arch='768/32',
                 in_channels=3,
                 norm_cfg=dict(type='BN'),
                 act_cfg=dict(type='GELU'),
                 out_indices=-1,
                 frozen_stages=0,
                 init_cfg=None):
        super().__init__(init_cfg=init_cfg)

        if isinstance(arch, str):
            assert arch in self.arch_settings, \
                f'Unavailable arch, please choose from ' \
                f'({set(self.arch_settings)}) or pass a dict.'
            arch = self.arch_settings[arch]
        elif isinstance(arch, dict):
            essential_keys = {
                'embed_dims', 'depth', 'patch_size', 'kernel_size'
            }
            assert isinstance(arch, dict) and essential_keys <= set(arch), \
                f'Custom arch needs a dict with keys {essential_keys}'

        self.embed_dims = arch['embed_dims']
        self.depth = arch['depth']
        self.patch_size = arch['patch_size']
        self.kernel_size = arch['kernel_size']
        self.act = build_activation_layer(act_cfg)

        # check out indices and frozen stages
        if isinstance(out_indices, int):
            out_indices = [out_indices]
        assert isinstance(out_indices, Sequence), \
            f'"out_indices" must by a sequence or int, ' \
            f'get {type(out_indices)} instead.'
        for i, index in enumerate(out_indices):
            if index < 0:
                out_indices[i] = self.depth + index
                assert out_indices[i] >= 0, f'Invalid out_indices {index}'
        self.out_indices = out_indices
        self.frozen_stages = frozen_stages

        # Set stem layers
        self.stem = nn.Sequential(
            nn.Conv2d(in_channels,
                      self.embed_dims,
                      kernel_size=self.patch_size,
                      stride=self.patch_size), self.act,
            build_norm_layer(norm_cfg, self.embed_dims)[1])

        # Set conv2d according to torch version
        convfunc = nn.Conv2d
        if digit_version(torch.__version__) < digit_version('1.9.0'):
            convfunc = Conv2dAdaptivePadding

        # Repetitions of ConvMixer Layer
        self.stages = nn.Sequential(*[
            nn.Sequential(
                Residual(
                    nn.Sequential(
                        convfunc(self.embed_dims,
                                 self.embed_dims,
                                 self.kernel_size,
                                 groups=self.embed_dims,
                                 padding='same'), self.act,
                        build_norm_layer(norm_cfg, self.embed_dims)[1])),
                nn.Conv2d(self.embed_dims, self.embed_dims, kernel_size=1),
                self.act,
                build_norm_layer(norm_cfg, self.embed_dims)[1])
            for _ in range(self.depth)
        ])

        self._freeze_stages()
コード例 #30
0
ファイル: activation.py プロジェクト: www516717402/mmcv
        - Output: :math:`(N, *)`, same shape as the input

    .. image:: scripts/activation_images/GELU.png

    Examples::

        >>> m = nn.GELU()
        >>> input = torch.randn(2)
        >>> output = m(input)
    """
    def forward(self, input):
        return F.gelu(input)


if (TORCH_VERSION == 'parrots'
        or digit_version(TORCH_VERSION) < digit_version('1.4')):
    ACTIVATION_LAYERS.register_module(module=GELU)
else:
    ACTIVATION_LAYERS.register_module(module=nn.GELU)


def build_activation_layer(cfg):
    """Build activation layer.

    Args:
        cfg (dict): The activation layer config, which should contain:

            - type (str): Layer type.
            - layer args: Args needed to instantiate an activation layer.

    Returns: