Exemple #1
0
 def __init__(self,
              in_channels,
              out_channels,
              keys='BRCBRC',
              stride=1,
              nobias=False,
              conv_keys='',
              depthrate=0,
              se_ratio=16,
              **dic):
     super(CSE_block, self).__init__()
     se_channels = out_channels // se_ratio
     with self.init_scope():
         self.res = Module(in_channels, out_channels, keys, stride, nobias,
                           conv_keys, depthrate, **dic)
         self.ide = Module(in_channels, out_channels, 'I', stride, nobias,
                           conv_keys, depthrate, **dic)
         self.rSE = Module(out_channels, se_channels, 'PcR', 1, nobias,
                           conv_keys, depthrate, **dic)
         self.iSE = Module(in_channels, se_channels, 'PcR', 1, nobias,
                           conv_keys, depthrate, **dic)
         self.SE = Module(se_channels * 2,
                          out_channels,
                          'cS',
                          1,
                          nobias,
                          conv_keys,
                          depthrate,
                          S=S,
                          **dic)
     self.out_channels = out_channels
     self.channels = [
         m.channels
         for m in (self.res, self.ide, self.rSE, self.iSE, self.SE)
     ]
 def __init__(self,
              Ns,
              in_channels,
              rules_channels,
              keys='I+BRCBRC',
              keys_join='',
              rule_channels_join=None,
              strides=(1, 2, 2),
              nobias=False,
              conv_keys='',
              **dic):
     super(Encoder, self).__init__()
     rules_channels = force_tuple(rules_channels, len(Ns))
     if not isinstance(rule_channels_join, RuleChannels):
         rule_channels_join = StaticChannels(rule_channels_join)
     dr = DepthRate(sum(Ns))
     for N, rule, stride in zip(Ns, rules_channels, strides):
         if len(self) > 0:
             out_channels = rule_channels_join(in_channels)
             self.append(
                 Module(in_channels, out_channels, keys_join, 1, nobias,
                        conv_keys, dr(0), **dic))
             in_channels = self[-1].out_channels
         self.append(
             Block(N, in_channels, rule, keys, stride, nobias, conv_keys,
                   dr, **dic))
         in_channels = self[-1].out_channels
         dr.add_offset(N)
     self.out_channels = in_channels
 def __init__(self, classes, Ns=(3,) * 3, channels=(16, 32, 64),
              firsts='CBR', mains='I+BRCBRC', lasts='BRP',
              keys_join='A', rule_channels_join=None, strides=(1, 1, 1),
              nobias=False, conv_keys='', **dic):
     super(ResNet, self).__init__()
     # Module before main.
     self.append(Module(None, channels[0], firsts, 1, True, conv_keys,
                        **dic))
     # main networks.
     self.append(Encoder(Ns, self[-1].out_channels, channels, mains,
                         keys_join, rule_channels_join, strides, nobias,
                         conv_keys, **dic))
     # Module after main.
     self.append(Module(self[-1].out_channels, None, lasts, 1, nobias,
                        conv_keys, **dic))
     # Classifier.
     self.append(L.Linear(classes))
 def __init__(self, classes, Ns=(18,) * 3, first_channels=16, alpha=48,
              firsts='CBR', mains='I+BCBRCB', lasts='BRP',
              keys_join='A', rule_channels_join=None, strides=(1, 1, 1),
              nobias=False, conv_keys='', **dic):
     super(PyramidNet, self).__init__()
     rule_channels = DynamicRatioChannels(alpha, sum(Ns))
     # Module before main.
     self.append(Module(None, first_channels, firsts, 1, True, conv_keys,
                        **dic))
     # main networks.
     self.append(Encoder(Ns, self[-1].out_channels, rule_channels, mains,
                         keys_join, rule_channels_join, strides, nobias,
                         conv_keys, **dic))
     # Module after main.
     self.append(Module(self[-1].out_channels, None, lasts, 1, nobias,
                        conv_keys, **dic))
     # Classifier.
     self.append(L.Linear(classes))
 def __init__(self, classes, Ns=(12,) * 3, first_channels=16, channels=12,
              firsts='CBR', mains='I,BRC', lasts='BRP',
              keys_join='BRcA', trans_theta=1, strides=(1, 1, 1),
              nobias=True, conv_keys='', **dic):
     super(DenseNet, self).__init__()
     rule_channels_join = DynamicChannels(trans_theta)
     # Module before main.
     self.append(Module(None, first_channels, firsts, 1, True, conv_keys,
                        **dic))
     # main networks.
     self.append(Encoder(Ns, self[-1].out_channels, channels, mains,
                         keys_join, rule_channels_join, strides, nobias,
                         conv_keys, **dic))
     # Module after main.
     self.append(Module(self[-1].out_channels, None, lasts, 1, nobias,
                        conv_keys, **dic))
     # Classifier.
     self.append(L.Linear(classes))
 def __init__(self, in_channels, out_channels, conv_ratio=0.5,
              keys='BRCBRC', stride=1, nobias=False,
              conv_keys='', depth_ratio=0, **dic):
     super(Shuffle_v2_block, self).__init__()
     self.copy = (out_channels > in_channels)
     if self.copy:
         self.conv_in = in_channels
         conv_out = out_channels - in_channels
     else:
         self.conv_in = int(in_channels * conv_ratio)
         conv_out = out_channels - in_channels + self.conv_in
     with self.init_scope():
         self.convs = Module(self.conv_in, conv_out, keys, stride, nobias,
                             conv_keys, depth_ratio, **dic)
 def __init__(self,
              N,
              in_channels,
              rule_channels,
              keys='I+BRCBRC',
              stride=1,
              nobias=False,
              conv_keys='',
              dr=DepthRate(1),
              **dic):
     super(Block, self).__init__()
     if not isinstance(rule_channels, RuleChannels):
         rule_channels = StaticChannels(rule_channels)
     for i in six.moves.range(N):
         out_channels = rule_channels(in_channels)
         self.append(
             Module(in_channels, out_channels, keys, stride, nobias,
                    conv_keys, dr(i), **dic))
         stride = 1
         in_channels = self[-1].out_channels
     self.out_channels = in_channels
 def __init__(self,
              classes=10,
              ch0=16,
              firsts='CBR',
              mains='X+BRCBRC',
              bottoms='BRPc',
              lasts='BRP',
              nobias=False,
              **dic):
     super(FishNet, self).__init__()
     with self.init_scope():
         self.f0 = Module(None, ch0, firsts, 1, nobias, **dic)
         # tail
         self.t0 = Module(ch0, ch0, mains, 1, nobias, **dic)
         self.t1 = Module(ch0, ch0 * 2, mains, 1, nobias, **dic)
         self.t2 = Module(ch0 * 2, ch0 * 4, mains, 1, nobias, **dic)
         self.t3 = Module(ch0 * 4, ch0 * 4, bottoms, 1, nobias, **dic)
         # body
         self.b2 = Module(ch0 * 8, ch0 * 4, mains, 1, nobias, **dic)
         self.b1 = Module(ch0 * 6, ch0 * 3, mains, 1, nobias, **dic)
         self.b0 = Module(ch0 * 4, ch0 * 2, mains, 1, nobias, **dic)
         # head
         self.h1 = Module(ch0 * 5, ch0 * 5, mains, 1, nobias, **dic)
         self.h2 = Module(ch0 * 9, ch0 * 9, mains, 1, nobias, **dic)
         self.h3 = Module(ch0 * 9, ch0 * 9, lasts, 1, nobias, **dic)
         # linear
         self.fin = L.Linear(classes)
def e(_self):
    return Module(_self.ch, _self.ch, 'PsRcS', s=s, S=S)
def E(_self):
    return Module(_self.ch, _self.ch, 'I*PsRcS', s=s, S=S)