示例#1
0
 def __init__(self, inplanes, planes, stride=1, downsample=None, alpha=0.5):
     super(BasicBlockIn, self).__init__()
     self.conv1 = OctConv2d(inplanes,
                            planes,
                            kernel_size=3,
                            stride=stride,
                            padding=1,
                            bias=False,
                            alpha=alpha)
     self.bn1 = _InstanceNorm2d(planes,
                                eps=1e-05,
                                momentum=0.1,
                                affine=True)
     self.relu = _ReLU(inplace=True)
     self.conv2 = OctConv2d(planes,
                            planes,
                            kernel_size=3,
                            stride=1,
                            padding=1,
                            bias=False,
                            alpha=alpha)
     self.bn2 = _InstanceNorm2d(planes,
                                eps=1e-05,
                                momentum=0.1,
                                affine=True)
     self.downsample = downsample
     self.stride = stride
示例#2
0
def conv_dw_plain(inp, oup, stride, dilation=1, alpha=0.5):
  if isinstance(alpha, int) or isinstance(alpha, float):
    alpha1 = alpha2 = alpha
  elif alpha is not None:
    alpha1 = alpha[0]
    alpha2 = alpha[1]
  return nn.Sequential(
    OctConv2d(inp, inp, 3, stride, 1 + (dilation > 0) * (dilation -1), dilation=dilation, groups=inp, bias=False, alpha=alpha),
    OctConv2d(inp, oup, 1, 1, 0, bias=False, alpha=alpha)
  )
示例#3
0
def conv_dw_in(inp, oup, stride, dilation=1, alpha = 0.5):
  if isinstance(alpha, int) or isinstance(alpha, float):
    alpha1 = alpha2 = alpha
  elif alpha is not None:
    alpha1 = alpha[0]
    alpha2 = alpha[1]
  return nn.Sequential(
    OctConv2d(inp, inp, 3, stride, 1 + (dilation > 0) * (dilation -1), dilation=dilation, groups=inp, bias=False, alpha=alpha1),
    OctConv2d(inp, oup, 1, 1, 0, bias=False, alpha=alpha2),
    _InstanceNorm2d(oup, eps=1e-05, momentum=0.1),
    _LeakyReLU(inplace=True, negative_slope=0.01),
  )
示例#4
0
def conv_dw_res(inp, oup, stride, alpha=0.5):
  if isinstance(alpha, int) or isinstance(alpha, float):
    alpha1 = alpha2 = alpha
  elif alpha is not None:
    alpha1 = alpha[0]
    alpha2 = alpha[1]
  return nn.Sequential(
    OctConv2d(inp, inp, 3, stride, 1, groups=inp, bias=False, alpha=alpha1),
    _BatchNorm2d(inp),
    _LeakyReLU(inplace=True, negative_slope=0.01),

    OctConv2d(inp, oup, 1, 1, 0, bias=False, alpha=alpha2),
    _BatchNorm2d(oup),
  )
示例#5
0
    def __init__(self,
                 num_classes=1000,
                 init_weights=True,
                 color=True,
                 alpha=0):
        super(OctHU, self).__init__()
        self.planes = [16, 32, 64, 128, 256]
        self.alpha = alpha

        self.conv1 = self.make_layers(3 if color else 1,
                                      self.planes[0],
                                      first_layer=True)
        self.conv2 = self.make_layers(self.planes[0], self.planes[1])
        self.conv3 = self.make_layers(self.planes[1], self.planes[2])
        self.conv4 = self.make_layers(self.planes[2], self.planes[3])
        self.conv5 = self.make_layers(self.planes[3], self.planes[4])

        self.conv6 = self.make_layers(self.planes[4] + self.planes[3],
                                      self.planes[3])
        self.conv7 = self.make_layers(self.planes[3] + self.planes[2],
                                      self.planes[2])
        self.conv8 = self.make_layers(self.planes[2] + self.planes[1],
                                      self.planes[1])
        self.conv9 = self.make_layers(self.planes[1] + self.planes[0],
                                      self.planes[0])

        self.conv10 = OctConv2d(self.planes[0],
                                1,
                                kernel_size=3,
                                padding=1,
                                alpha=(self.alpha, 0))

        self.pool = _MaxPool2d(kernel_size=2, stride=2)
示例#6
0
    def make_layers(self, inplanes, outplanes, first_layer=False):
        layers = []
        first_alpha = self.alpha if not first_layer else (0, self.alpha)
        layers.append(
            OctConv2d(inplanes,
                      outplanes,
                      kernel_size=3,
                      padding=1,
                      alpha=first_alpha))
        layers.append(_ReLU(inplace=True))
        layers.append(
            OctConv2d(outplanes,
                      outplanes,
                      kernel_size=3,
                      padding=1,
                      alpha=self.alpha))
        layers.append(_ReLU(inplace=True))

        return nn.Sequential(*layers)
示例#7
0
def test_forward_wrong_shapes():
    x_h = torch.rand(2, 3, 200, 200)  # (b, c, h, w)
    x_l = torch.rand(2, 3, 100, 100)  # (b, c, h, w)
    conv = OctConv2d(in_channels=5,
                     out_channels=10,
                     kernel_size=3,
                     alpha=(0.5, 0.5),
                     padding=1)

    with pytest.raises(AssertionError):
        _ = conv((x_h, x_l))
示例#8
0
def conv1x1(in_channels, out_channels, groups=1, alpha=0.5):
    """1x1 convolution with padding
    - Normal pointwise convolution When groups == 1
    - Grouped pointwise convolution when groups > 1
    """
    return OctConv2d(in_channels,
                     out_channels,
                     kernel_size=1,
                     groups=groups,
                     stride=1,
                     alpha=alpha)
示例#9
0
def test_forward_zero_alpha():
    x = torch.rand(2, 3, 200, 200)  # (b, c, h, w)
    conv1 = OctConv2d(in_channels=3,
                      out_channels=10,
                      kernel_size=3,
                      alpha=0.,
                      padding=1)

    out = conv1(x)

    shape = tuple(out.shape)
    assert shape == (2, 10, 200, 200)
示例#10
0
def conv3x3(in_channels, out_channels, stride=1, 
          padding=1, bias=True, groups=1, alpha=0.5):    
  """3x3 convolution with padding
  """
  return OctConv2d(
    in_channels, 
    out_channels, 
    kernel_size=3, 
    stride=stride,
    padding=padding,
    bias=bias,
    groups=groups,
    alpha=alpha)
示例#11
0
def test_forward_cascade():
    x = torch.rand(2, 3, 200, 200)  # (b, c, h, w)
    conv1 = OctConv2d(in_channels=3,
                      out_channels=10,
                      kernel_size=3,
                      alpha=(0., 0.5),
                      padding=1)
    conv2 = OctConv2d(in_channels=10,
                      out_channels=20,
                      kernel_size=7,
                      alpha=(0.5, 0.8),
                      padding=3)
    conv3 = OctConv2d(in_channels=20,
                      out_channels=1,
                      kernel_size=3,
                      alpha=(0.8, 0.),
                      padding=1)

    out = conv3(conv2(conv1(x)))

    shape = tuple(out.shape)
    assert shape == (2, 1, 200, 200)
示例#12
0
def test_forward_single_input_stride1():
    x = torch.rand(2, 3, 200, 200)  # (b, c, h, w)
    conv = OctConv2d(in_channels=3,
                     out_channels=10,
                     kernel_size=3,
                     alpha=(0., 0.5),
                     padding=1)
    out_h, out_l = conv(x)

    shape_h = tuple(out_h.shape)
    shape_l = tuple(out_l.shape)

    assert shape_h == (2, 5, 200, 200)
    assert shape_l == (2, 5, 100, 100)
示例#13
0
文件: layers.py 项目: mrku69/octconv
    def __init__(self, in_channels, out_channels, kernel_size, alpha=0.5, stride=1, padding=0,
                 bias=False, norm_layer=None):

        super(OctConvBn, self).__init__()

        if norm_layer is None:
            norm_layer = nn.BatchNorm2d

        self.conv = OctConv2d(in_channels, out_channels, kernel_size=kernel_size,
                              alpha=alpha, stride=stride, padding=padding, bias=bias)

        alpha_out = self.conv.alpha_out

        self.bn_h = None if alpha_out == 1 else norm_layer(self.conv.out_channels['high'])
        self.bn_l = None if alpha_out == 0 else norm_layer(self.conv.out_channels['low'])
示例#14
0
def test_forward_split_input():
    x_h = torch.rand(2, 2, 200, 200)  # (b, c, h, w)
    x_l = torch.rand(2, 3, 100, 100)  # (b, c, h, w)
    conv = OctConv2d(in_channels=5,
                     out_channels=10,
                     kernel_size=3,
                     alpha=(0.5, 0.5),
                     padding=1)
    out_h, out_l = conv((x_h, x_l))

    shape_h = tuple(out_h.shape)
    shape_l = tuple(out_l.shape)

    assert shape_h == (2, 5, 200, 200)
    assert shape_l == (2, 5, 100, 100)
示例#15
0
  def _make_layer(self, block, planes, blocks, stride=1, alpha=0.5):
    
    downsample = None
    if stride != 1 or self.inplanes != planes * block.expansion:
      downsample = nn.Sequential(
  
        OctConv2d(self.inplanes, planes * block.expansion,
                  kernel_size=1, stride=stride, bias=False, alpha=alpha),
        _BatchNorm2d(planes * block.expansion, alpha_in=alpha, alpha_out=alpha),
      )

    layers = []
    layers.append(block(self.inplanes, planes, stride, downsample))
    self.inplanes = planes * block.expansion
    for i in range(1, blocks):
      layers.append(block(self.inplanes, planes, alpha=alpha))

    return nn.Sequential(*layers)
示例#16
0
    def __init__(self, inp, oup, stride, expand_ratio, alpha=(0.5, 0.5)):
        super(InvertedResidualOct, self).__init__()
        self.stride = stride
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = self.stride == 1 and inp == oup
        is_initial = alpha[0] == 0
        is_final = alpha[1] == 0

        alpha_1 = alpha_2 = alpha_3 = alpha

        if is_initial:
            alpha_2 = (alpha[1], alpha[1])
            alpha_3 = (alpha[1], alpha[1])

        if is_final:
            alpha_1 = (alpha[0], alpha[0])
            alpha_3 = (alpha[1], alpha[1])

        layers = []
        if expand_ratio != 1:
            # pw
            layers.append(OctConvBlock(in_channels=inp, out_channels=hidden_dim,
                        kernel_size=1, stride=1, alpha=alpha_1, padding=0,
                        activation=_LeakyReLU(inplace=True), batch_norm=_BatchNorm2d(hidden_dim, alpha=alpha_1)))
            alpha_1 = alpha_2 if not is_final else alpha_1

        layers.extend([
            # dw
            OctConvBlock(in_channels=hidden_dim, out_channels=hidden_dim,
                    kernel_size=3, stride=stride, alpha=alpha_1, padding=1,
                    groups=True, activation=_LeakyReLU(inplace=True), batch_norm=_BatchNorm2d(hidden_dim, alpha=alpha_1)),
            # pw-linear
            OctConv2d(hidden_dim, oup, kernel_size=1, stride=1, padding=0, alpha=alpha_2),
            _BatchNorm2d(oup, alpha=alpha_3),
        ])
        self.conv = nn.Sequential(*layers)
示例#17
0
def benchmark_conv():
    x = torch.rand(1, 3, 224, 224)

    conv1 = nn.Conv2d(3, 64, 3)
    conv2 = OctConv2d(3, 64, 3, alpha=(0., 0.5))

    if torch.cuda.is_available():
        x = x.cuda()
        conv1 = conv1.cuda()
        conv2 = conv2.cuda()

    t0 = time.time()
    conv1(x)
    t1 = time.time()
    conv2(x)
    t2 = time.time()

    conv_time = t1 - t0
    octconv_time = t2 - t1

    print("Conv2D:", conv_time)
    print("OctConv2D:", octconv_time)
    print("ratio:", conv_time / octconv_time * 100)
示例#18
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size=3,
              stride=1,
              alpha=(.25, .25),
              padding=0,
              dilation=1,
              groups=False,
              bias=False,
              activation=None,
              batch_norm=None):
     super(OctConvBlock, self).__init__()
     self.conv = OctConv2d(in_channels,
                           out_channels,
                           kernel_size,
                           stride=stride,
                           padding=padding,
                           alpha=alpha,
                           dilation=dilation,
                           groups=groups,
                           bias=bias)
     self.bn = batch_norm
     self.act = activation
示例#19
0
def _octconv(
        in_channels,  #ip,
        out_channels,  #filters,
        kernel_size=3,  #(3, 3),
        stride=1,  #(1, 1),
        padding='same',
        alpha=(.5, .5),
        dilation=1,
        groups=False,
        bias=False):
    if padding == 'same':
        padding = (kernel_size - 1) // 2
    elif type(padding) == int:
        padding = padding

    return OctConv2d(in_channels,
                     out_channels,
                     kernel_size,
                     stride=stride,
                     padding=padding,
                     alpha=alpha,
                     dilation=dilation,
                     groups=groups,
                     bias=bias)
示例#20
0
  def __init__(self, attention = False, multi_scale = True):
    super(OctShuffleMLT, self).__init__()
    
    self.inplanes = 64
    alpha = 0.5
    
    self.layer0 = nn.Sequential(
      OctConv2d(3, 16, 3, stride=1, padding=1, bias=False, alpha=(0, 0.5)),
      CReLU_IN(16),
      OctConv2d(32, 32, 3, stride=2, padding=1, bias=False),
      CReLU_IN(32)
    )
    
    self.layer0_1 = nn.Sequential(
      OctConv2d(64, 64, 3, stride=1, padding=1, bias=False),
      #nn.InstanceNorm2d(64, affine=True),
      _ReLU(),
      OctConv2d(64, 64, 3, stride=2, padding=1, bias=False),
      #nn.InstanceNorm2d(64, affine=True),
      _ReLU(inplace=True)
    )
    
    self.conv5 = OctConv2d(64, 128, 3, padding=1, bias=False)
    self.conv6 = OctConv2d(128, 128, 3, padding=1, bias=False)
    self.conv7 = OctConv2d(128,256, 3, padding=1, bias=False)
    self.conv8 = OctConv2d(256, 256, 3, padding=1, bias=False)
    self.conv9_1 = OctConv2d(256, 256, 3, padding=1, bias=False)
    self.conv9_2 = OctConv2d(256, 256, 3, padding=1, bias=False, alpha=(0.5, 0))
    self.conv10_s = Conv2d(256, 256, (2, 3), padding=(0, 1), bias=False)
    self.conv11 = Conv2d(256, 106, 1, padding=(0,0))
    
    self.batch5 = _InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
    self.batch6 = _InstanceNorm2d(128, eps=1e-05, momentum=0.1, affine=True)
    self.batch7 = _InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
    self.batch8 = _InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
    self.batch9 = _InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
    self.batch10_s = InstanceNorm2d(256, eps=1e-05, momentum=0.1, affine=True)
    self.max2_1 = nn.MaxPool2d((2, 1), stride=(2,1))
    self.max2 = _MaxPool2d((2, 1), stride=(2,1))
    self.leaky = _LeakyReLU(negative_slope=0.01, inplace=True)
    self.leaky2 = LeakyReLU(negative_slope=0.01, inplace=True)

    self.groups = 3
    self.stage_out_channels = [-1, 24, 240, 480, 960]
    self.stage_repeats = [3, 7, 3]
    self.layer1 = self._make_layer(BasicBlockIn, 24, 3, stride=1, alpha=alpha)
    self.layer2 = self._make_stage(2)
    self.layer3 = self._make_stage(3)
    self.layer4 = self._make_stage(4)

    self.feature4 = OctConv2d(960, 256, 1, stride=1, padding=0, bias=False, alpha=(0.5, 0))
    self.feature3 = OctConv2d(480, 256, 1, stride=1, padding=0, bias=False, alpha=(0.5, 0))
    self.feature2 = OctConv2d(240, 256, 1, stride=1, padding=0, bias=False, alpha=(0.5, 0))
    
    self.upconv2 = conv_dw_plain(256, 256, stride=1, alpha=0)
    self.upconv1 = conv_dw_plain(256, 256, stride=1, alpha=0)
    
    self.feature1 = OctConv2d(24, 256, 1, stride=1, padding=0, bias=False, alpha=(0.5, 0))
    
    self.act = OctConv2d(256, 1, 1, padding=0, stride=1, alpha=0)
    self.rbox = OctConv2d(256, 4, 1, padding=0, stride=1, alpha=0)
    
    self.angle = OctConv2d(256, 2, 1, padding=0, stride=1, alpha=0)
    self.drop0 = _Dropout2d(p=0.2, inplace=False)
    self.drop1 = Dropout2d(p=0.2, inplace=False)
    
    self.angle_loss = nn.MSELoss(reduction='elementwise_mean')
    self.h_loss = nn.SmoothL1Loss(reduction='elementwise_mean')
    self.w_loss = nn.SmoothL1Loss(reduction='elementwise_mean')
    
    self.attention = attention
  
    if self.attention:
      self.conv_attenton = OctConv2d(256, 1, kernel_size=1, stride=1, padding=0, bias=True, alpha=0) 
    
    self.multi_scale = multi_scale
示例#21
0
def conv_bn(inp, oup, stride, alpha=0.5):
    return nn.Sequential(
      OctConv2d(inp, oup, 3, stride, 1, bias=False, alpha=alpha),
      _BatchNorm2d(oup),
      _ReLU(inplace=True)
    )