Пример #1
0
    def __init__(self, num_class):
        self.base1 = rm.Sequential([
            InceptionV2Stem(),
            InceptionV2BlockA([64, 48, 64, 64, 96, 32]),
            InceptionV2BlockA(),
            InceptionV2BlockA(),
            InceptionV2BlockB(),
            InceptionV2BlockC([192, 128, 192, 128, 192, 192]),
            InceptionV2BlockC(),
            InceptionV2BlockC(),
            InceptionV2BlockC()])
        self.aux1 = rm.Sequential([
            rm.AveragePool2d(filter=5, stride=3),
            rm.Conv2d(128, filter=1),
            rm.Relu(),
            rm.Conv2d(768, filter=1),
            rm.Relu(),
            rm.Flatten(),
            rm.Dense(num_class)])

        self.base2 = rm.Sequential([
            InceptionV2BlockD(),
            InceptionV2BlockE(),
            InceptionV2BlockE(),
            rm.AveragePool2d(filter=8),
            rm.Flatten()])

        self.aux2 = rm.Dense(num_class)
 def __init__(self,):
     channel = 8
     intermidiate_dim = 128
     self.cnn1 = rm.Sequential([
         # 28x28 -> 28x28
         rm.Conv2d(channel=channel,filter=3,stride=1,padding=1),
         rm.LeakyRelu(),
         rm.Dropout(),
         # 28x28 -> 14x14
         rm.Conv2d(channel=channel*2,filter=3,stride=2,padding=1),
         rm.LeakyRelu(),
         rm.Dropout(),
         # 14x14 -> 8x8
         rm.Conv2d(channel=channel*4,filter=3,stride=2,padding=2),
         rm.LeakyRelu(),
         rm.Dropout(),
         # 8x8 -> 4x4
         rm.Conv2d(channel=channel*8,filter=3,stride=2,padding=1),
         rm.LeakyRelu(),
         rm.Dropout(),
     ])
     self.cnn2 = rm.Sequential([
         #rm.Dropout(),
         rm.Flatten(),
         #rm.Dense(intermidiate_dim)
     ])
     self.output = rm.Dense(1)
Пример #3
0
    def __init__(self, num_class):
        init_deconv = DeconvInitializer()
        self.block1 = layer_factory(channel=64, conv_layer_num=2, first=True)
        self.block2 = layer_factory(channel=128, conv_layer_num=2)
        self.block3 = layer_factory(channel=256, conv_layer_num=3)
        self.block4 = layer_factory(channel=512, conv_layer_num=3)
        self.block5 = layer_factory(channel=512, conv_layer_num=3)

        self.fc6 = rm.Conv2d(4096, filter=7, stride=1, padding=0)
        self.dr1 = rm.Dropout(dropout_ratio=0.5)
        self.fc7 = rm.Conv2d(4096, filter=1, stride=1, padding=0)
        self.dr2 = rm.Dropout(dropout_ratio=0.5)

        self.score_fr = rm.Conv2d(num_class, filter=1, stride=1,
                                  padding=0)  # n_classes
        self.score_pool4 = rm.Conv2d(num_class, filter=1, padding=0)

        self.upscore2 = rm.Deconv2d(num_class,
                                    filter=4,
                                    stride=2,
                                    padding=0,
                                    ignore_bias=True,
                                    initializer=init_deconv)  # n_classes
        self.upscore16 = rm.Deconv2d(num_class,
                                     filter=32,
                                     stride=16,
                                     padding=0,
                                     ignore_bias=True,
                                     initializer=init_deconv)  # n_classes
Пример #4
0
 def denseblock(
     self,
     dim_v=8,
     dim_h=8,
     input_channels=10,
     dropout=False,
     out_ch=0.5,
 ):
     parameters = []
     c = input_channels
     print('-> {}'.format(c))
     for _ in range(self.depth):
         c += self.growth_rate
         print('Batch Normalize')
         parameters.append(rm.BatchNormalize())
         print(' Conv2d > {}x{} {}ch'.format(dim_v, dim_h,
                                             self.growth_rate))
         parameters.append(
             rm.Conv2d(self.growth_rate, filter=3, padding=(1, 1)))
         if self.dropout:
             print(' Dropout')
     c = int(c*out_ch) if isinstance(out_ch, float) \
         else out_ch
     print('*Conv2d > {}x{} {}ch'.format(dim_v, dim_h, c))
     parameters.append(rm.Conv2d(c, filter=1))
     if self.dropout:
         print(' Dropout')
     print(' Average Pooling')
     print('<- {}'.format(c))
     return parameters, c
Пример #5
0
def conv_block(growth_rate):
    return rm.Sequential([
        rm.BatchNormalize(epsilon=0.001, mode='feature'),
        rm.Relu(),
        rm.Conv2d(growth_rate * 4, 1, padding=0),
        rm.BatchNormalize(epsilon=0.001, mode='feature'),
        rm.Relu(),
        rm.Conv2d(growth_rate, 3, padding=1),
    ])
Пример #6
0
    def __init__(self, channels=[64, 96, 384]):
        self.conv1_reduced = rm.Conv2d(channels[0], filter=1)
        self.batch_norm1_reduced = rm.BatchNormalize(mode='feature')
        self.conv1_1 = rm.Conv2d(channels[1], filter=3, padding=1)
        self.batch_norm1_1 = rm.BatchNormalize(mode='feature')
        self.conv1_2 = rm.Conv2d(channels[1], filter=3, stride=2)
        self.batch_norm1_2 = rm.BatchNormalize(mode='feature')

        self.conv2 = rm.Conv2d(channels[2], filter=3, stride=2)
        self.batch_norm2 = rm.BatchNormalize(mode='feature')
 def __init__(self):
     super(Cifar10, self).__init__()
     self._l1 = rm.Conv2d(channel=32)
     self._l2 = rm.Conv2d(channel=32)
     self._l3 = rm.Conv2d(channel=64)
     self._l4 = rm.Conv2d(channel=64)
     self._l5 = rm.Dense(512)
     self._l6 = rm.Dense(10)
     self._sd = rm.SpatialDropout(dropout_ratio=0.25)
     self._pool = rm.MaxPool2d(filter=2, stride=2)
Пример #8
0
def layer_factory(channel=64, conv_layer_num=2, first=None):
    layers = []
    for _ in range(conv_layer_num):
        if first is not None:
            layers.append(rm.Conv2d(channel=channel, padding=100, filter=3))
            layers.append(rm.Relu())
            first = None
        else:
            layers.append(rm.Conv2d(channel=channel, padding=1, filter=3))
            layers.append(rm.Relu())
    layers.append(MaxPool2d(filter=2, stride=2, ceil_mode=True))
    return rm.Sequential(layers)
Пример #9
0
    def __init__(self, channels=[192, 320, 192, 192]):
        self.conv1_reduced = rm.Conv2d(channels[0], filter=1)
        self.batch_norm1_reduced = rm.BatchNormalize(mode='feature')
        self.conv1 = rm.Conv2d(channels[1], filter=3, stride=2)
        self.batch_norm1 = rm.BatchNormalize(mode='feature')

        self.conv2_reduced = rm.Conv2d(channels[2], filter=1)
        self.batch_norm2_reduced = rm.BatchNormalize(mode='feature')
        self.conv2_1 = rm.Conv2d(channels[3], filter=3, padding=1)
        self.batch_norm2_1 = rm.BatchNormalize(mode='feature')
        self.conv2_2 = rm.Conv2d(channels[3], filter=3, stride=2)
        self.batch_norm2_2 = rm.BatchNormalize(mode='feature')
Пример #10
0
    def __init__(self):
        # k, l, m, n
        # 192, 224, 256, 384
        self.conv1 = rm.Conv2d(384, filter=3, stride=2)
        self.batch_norm1 = rm.BatchNormalize(mode='feature')

        self.conv2_red = rm.Conv2d(192, filter=1)
        self.batch_norm2_red = rm.BatchNormalize(mode='feature')
        self.conv2_1 = rm.Conv2d(224, filter=3, padding=1)
        self.batch_norm2_1 = rm.BatchNormalize(mode='feature')
        self.conv2_2 = rm.Conv2d(256, filter=3, stride=2)
        self.batch_norm2_2 = rm.BatchNormalize(mode='feature')
Пример #11
0
    def __init__(self, num_class):
        self.block1 = layer_factory(channel=64, conv_layer_num=2)
        self.block2 = layer_factory(channel=128, conv_layer_num=2)
        self.block3 = layer_factory(channel=256, conv_layer_num=3)
        self.block4 = layer_factory(channel=512, conv_layer_num=3)
        self.block5 = layer_factory(channel=512, conv_layer_num=3)

        self.fc6 = rm.Conv2d(4096, filter=7, padding=3)
        self.fc7 = rm.Conv2d(4096, filter=1)

        self.score_fr = rm.Conv2d(num_class, filter=1)  # n_classes
        self.upscore = rm.Deconv2d(num_class, stride=32, padding=0,
                                   filter=32)  # n_classes
Пример #12
0
def transition_layer(growth_rate):
    return rm.Sequential([
        rm.BatchNormalize(epsilon=0.001, mode='feature'),
        rm.Relu(),
        rm.Conv2d(growth_rate, filter=1, padding=0, stride=1),
        rm.AveragePool2d(filter=2, stride=2)
    ])
Пример #13
0
 def __init__(self, num_classes, block, layers, cardinality):
     self.inplanes = 128
     self.cardinality = cardinality
     super(ResNeXt, self).__init__()
     self.conv1 = rm.Conv2d(64,
                            filter=7,
                            stride=2,
                            padding=3,
                            ignore_bias=True)
     self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.relu = rm.Relu()
     self.maxpool = rm.MaxPool2d(filter=3, stride=2, padding=1)
     self.layer1 = self._make_layer(block,
                                    128,
                                    layers[0],
                                    stride=1,
                                    cardinality=self.cardinality)
     self.layer2 = self._make_layer(block,
                                    256,
                                    layers[1],
                                    stride=2,
                                    cardinality=self.cardinality)
     self.layer3 = self._make_layer(block,
                                    512,
                                    layers[2],
                                    stride=2,
                                    cardinality=self.cardinality)
     self.layer4 = self._make_layer(block,
                                    1024,
                                    layers[3],
                                    stride=2,
                                    cardinality=self.cardinality)
     self.flat = rm.Flatten()
     self.fc = rm.Dense(num_classes)
Пример #14
0
def conv3x3(out_planes, stride=1):
    """3x3 convolution with padding"""
    return rm.Conv2d(out_planes,
                     filter=3,
                     stride=stride,
                     padding=1,
                     ignore_bias=True)
Пример #15
0
def layer_factory_deconv(channel_list=[512, 256]):
    layers = []
    layers.append(
        rm.Conv2d(channel=channel_list[0],
                  padding=1,
                  filter=3,
                  initializer=GlorotUniform()))
    layers.append(rm.Relu())
    if 'ceil_mode' in inspect.signature(rm.Deconv2d).parameters:
        layers.append(
            rm.Deconv2d(channel=channel_list[1],
                        padding=1,
                        filter=3,
                        stride=2,
                        initializer=GlorotUniform(),
                        ceil_mode=True))
    else:
        layers.append(
            Deconv2d(channel=channel_list[1],
                     padding=1,
                     filter=3,
                     stride=2,
                     initializer=GlorotUniform(),
                     ceil_mode=True))
    layers.append(rm.Relu())
    return rm.Sequential(layers)
Пример #16
0
def test_gpu_node_convolution2d(a):
    with use_cuda():

        layer = rm.Conv2d(channel=32)
        layer.params["w"] = rm.Variable(np.random.rand(32, 3, 3, 3))
        layer.params["b"] = rm.Variable(np.random.rand(1, 32, 1, 1))

        g1 = Variable(a)
        g2 = layer(g1)
        g3 = rm.sum(g2)
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g2.to_cpu()
        g3.to_cpu()

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g2, c2)
    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Пример #17
0
def layer_factory(channel=32, conv_layer_num=2):
    layers = []
    for _ in range(conv_layer_num):
        layers.append(rm.Conv2d(channel=channel, padding=1, filter=3))
        layers.append(rm.Relu())
    layers.append(rm.MaxPool2d(filter=2, stride=2))
    return rm.Sequential(layers)
Пример #18
0
def test_conv2d(tmpdir):
    model = rm.Sequential([
        rm.Conv2d(channel=32, filter=3, padding=1),
    ])

    input = renom.Variable(np.random.random((10, 10, 10, 10)))
    m = _run_onnx(tmpdir, model, input)

    assert m.graph.node[0].op_type == 'Conv'

    attrs = dict((a.name, a) for a in m.graph.node[0].attribute)
    assert attrs['pads'].ints == [1, 1]
    assert attrs['dilations'].ints == [1, 1]
    assert attrs['kernel_shape'].ints == [3, 3]
    assert attrs['strides'].ints == [1, 1]

    id_input, w, b = m.graph.node[0].input

    # check input
    assert 'input' == id_input
    assert get_shape(m.graph.input[0]) == input.shape

    conv2d = model._layers[0]
    inis = load_initializer(m.graph.initializer)

    # check w
    assert get_shape(m.graph.input[1]) == conv2d.params.w.shape
    _test_initializer(inis, w, conv2d.params.w)

    # check b
    assert get_shape(m.graph.input[2]) == (np.prod(conv2d.params.b.shape), )
    _test_initializer(inis, b, conv2d.params.b)

    # check output
    assert get_shape(m.graph.output[0]) == (10, 32, 10, 10)
Пример #19
0
    def __init__(self,
                 class_map=None,
                 cells=7,
                 bbox=2,
                 imsize=(224, 224),
                 load_pretrained_weight=False,
                 train_whole_network=False):

        if not hasattr(cells, "__getitem__"):
            cells = (cells, cells)

        self._cells = cells
        self._bbox = bbox
        model = Darknet()
        super(Yolov1, self).__init__(class_map, imsize, load_pretrained_weight,
                                     train_whole_network, model)

        self._last_dense_size = (self.num_class +
                                 5 * bbox) * cells[0] * cells[1]
        self._freezed_network = rm.Sequential(model[:-4])
        self._network = rm.Sequential([
            rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True),
            rm.BatchNormalize(mode='feature'),
            rm.LeakyRelu(slope=0.1),
            rm.Conv2d(channel=1024,
                      filter=3,
                      padding=1,
                      stride=2,
                      ignore_bias=True),
            rm.BatchNormalize(mode='feature'),
            rm.LeakyRelu(slope=0.1),
            rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True),
            rm.BatchNormalize(mode='feature'),
            rm.LeakyRelu(slope=0.1),
            rm.Conv2d(channel=1024, filter=3, padding=1, ignore_bias=True),
            rm.BatchNormalize(mode='feature'),
            rm.LeakyRelu(slope=0.1),
            rm.Flatten(),
            rm.Dense(
                4096
            ),  # instead of locally connected layer, we are using Dense layer
            rm.LeakyRelu(slope=0.1),
            rm.Dropout(0.5),
            rm.Dense(self._last_dense_size)
        ])
        self._opt = rm.Sgd(0.0005, 0.9)
Пример #20
0
 def __init__(self, inplanes, planes, stride=1, downsample=None):
     super(Bottleneck, self).__init__()
     self.conv1 = rm.Conv2d(planes, filter=1, ignore_bias=True)
     self.bn1 = rm.BatchNormalize(mode='feature')
     self.conv2 = rm.Conv2d(planes,
                            filter=3,
                            stride=stride,
                            padding=1,
                            ignore_bias=True)
     self.bn2 = rm.BatchNormalize(mode='feature')
     self.conv3 = rm.Conv2d(planes * self.expansion,
                            filter=1,
                            ignore_bias=True)
     self.bn3 = rm.BatchNormalize(mode='feature')
     self.relu = rm.Relu()
     self.downsample = downsample
     self.stride = stride
Пример #21
0
 def __init__(self, channel, filter=3, prev_ch=None):
     pad = int((filter - 1) / 2)
     if prev_ch is not None:
         self._conv = rm.Conv2d(channel=channel, filter=filter, padding=pad)
         self._conv.params = {
             "w":
             rm.Variable(self._conv._initializer(
                 (channel, prev_ch, filter, filter)),
                         auto_update=True),
             "b":
             rm.Variable(np.zeros((1, channel, 1, 1), dtype=np.float32),
                         auto_update=False),
         }
         self._bn = rm.BatchNormalize(mode='feature', momentum=0.99)
     else:
         self._conv = rm.Conv2d(channel=channel, filter=filter, padding=pad)
         self._bn = rm.BatchNormalize(mode='feature', momentum=0.99)
Пример #22
0
def layer_factory(channel_list=[64]):
    layers = []
    for i in range(len(channel_list)):
        layers.append(
            rm.Conv2d(channel=channel_list[i],
                      padding=1,
                      filter=3,
                      initializer=GlorotUniform()))
        layers.append(rm.Relu())
    return rm.Sequential(layers)
Пример #23
0
 def __init__(self, planes, stride=1, downsample=None, cardinality=32):
     super(Bottleneck, self).__init__()
     self.cardinality = cardinality
     self.conv1 = rm.Conv2d(planes, filter=1, ignore_bias=True)
     self.bn1 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.conv2 = rm.GroupConv2d(planes,
                                 filter=3,
                                 stride=stride,
                                 padding=1,
                                 ignore_bias=True,
                                 groups=self.cardinality)
     self.bn2 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.conv3 = rm.Conv2d(planes * self.expansion,
                            filter=1,
                            ignore_bias=True)
     self.bn3 = rm.BatchNormalize(epsilon=0.00001, mode='feature')
     self.relu = rm.Relu()
     self.downsample = downsample
     self.stride = stride
 def denseblock(self, dim=8, input_channels=10, dropout=False):
     parameters = []
     c = input_channels
     print('-> {}'.format(c))
     for _ in range(self.depth):
         c += self.growth_rate
         print('Batch Normalize')
         parameters.append(rm.BatchNormalize())
         print(' Conv2d > {}x{} {}ch'.format(dim, dim, self.growth_rate))
         parameters.append(
             rm.Conv2d(self.growth_rate, filter=3, padding=(1, 1)))
         if self.dropout:
             print('Dropout')
     c = int(c * self.compression)
     print('*Conv2d > {}x{} {}ch'.format(dim, dim, c))
     parameters.append(rm.Conv2d(c, filter=1))
     print(' Average Pooling')
     print('<- {}'.format(c))
     return parameters, c
Пример #25
0
 def __init__(self, num_class=1000):
     self._num_class = num_class
     self._base = Darknet19Base()
     self._last = rm.Conv2d(num_class, filter=1)
     self._last.params = {
         "w":
         rm.Variable(self._last._initializer((num_class, 1024, 1, 1)),
                     auto_update=True),
         "b":
         rm.Variable(self._last._initializer((1, num_class, 1, 1)),
                     auto_update=False),
     }
Пример #26
0
 def __init__(self, channels=[64, 96, 128, 16, 32]):
     self.conv1 = rm.Conv2d(channels[0], filter=1)
     self.conv2_reduced = rm.Conv2d(channels[1], filter=1)
     self.conv2 = rm.Conv2d(channels[2], filter=3, padding=1)
     self.conv3_reduced = rm.Conv2d(channels[1], filter=1)
     self.conv3 = rm.Conv2d(channels[2], filter=5, padding=2)
     self.conv4 = rm.Conv2d(channels[1], filter=1)
 def __init__(self,
              input_shape=(28, 28),
              blocks=2,
              depth=3,
              growth_rate=12,
              latent_dim=10,
              dropout=False,
              intermidiate_dim=128,
              compression=0.5,
              initial_channel=8):
     self.depth = depth
     self.input_shape = input_shape
     self.latent_dim = latent_dim
     self.dropout = dropout
     self.intermidiate_dim = intermidiate_dim
     self.compression = compression
     self.growth_rate = growth_rate
     self.blocks = blocks
     print('--- Ecoding Network ---')
     parameters = []
     channels = initial_channel
     dim = self.input_shape[0]
     print('Input image {}x{}'.format(dim, dim))
     dim = dim // 2
     print(' Conv2d > {}x{} {}ch'.format(dim, dim, channels))
     self.input = rm.Conv2d(channels, filter=5, padding=2, stride=2)
     for _ in range(blocks):
         t_params, channels = self.denseblock(
             dim=dim,
             input_channels=channels,
         )
         parameters += t_params
         dim = (dim + 1) // 2
     self.hidden = rm.Sequential(parameters)
     nb_parameters = dim * dim * channels
     print(' Flatten {} params'.format(nb_parameters))
     parameters = []
     fcnn_depth = int(
         (np.log(nb_parameters / intermidiate_dim)) / np.log(4))
     nb_parameters = nb_parameters // 4
     for _ in range(fcnn_depth):
         print(' Dense {}u'.format(nb_parameters))
         parameters.append(rm.Dense(nb_parameters))
         nb_parameters = nb_parameters // 4
     print(' Dense {}u'.format(intermidiate_dim))
     parameters.append(rm.Dense(intermidiate_dim))
     print('*Mean Dense {}u'.format(latent_dim))
     parameters.append(rm.Dense(latent_dim, initializer=Uniform()))
     print('*Log Var Dense {}u'.format(latent_dim))
     parameters.append(rm.Dense(latent_dim, initializer=Gaussian(std=0.3)))
     self.fcnn = rm.Sequential(parameters)
Пример #28
0
def exp_convolution1():
    np.random.seed(10)
    # Caused by CUDNN_CONVOLUTION_FWD_ALGO_GEMM is not deterministic.
    # 1724.07080078 GPU
    # 1715.86767578 CPU
    cuda.set_cuda_active(True)
    a = np.random.randn(8 * 2, 64, 32, 32).astype(np.float32)
    b = np.random.randn(8 * 2, 32, 28, 28).astype(np.float32)
    layer1 = rm.Conv2d(channel=32, input_size=a.shape[1:])
    layer2 = rm.Conv2d(channel=32, input_size=(32, 30, 30))

    ga = rm.Variable(a, auto_update=False)
    gb = rm.Variable(b, auto_update=False)

    opt = Sgd(0.0001, momentum=0.0)
    start_t = time.time()
    for _ in range(100):
        loss = rm.Sum((layer2(rm.Relu(layer1(ga))) - gb)**2) / 8
        loss.ensure_cpu()
        grad = loss.grad()
        grad.update(opt)
        print(loss)
    print(time.time() - start_t)
Пример #29
0
    def __init__(self, num_class):
        self.base1 = rm.Sequential([rm.Conv2d(64, filter=7, padding=3, stride=2),
                                    rm.Relu(),
                                    rm.MaxPool2d(filter=3, stride=2, padding=1),
                                    rm.BatchNormalize(mode='feature'),
                                    rm.Conv2d(64, filter=1, stride=1),
                                    rm.Relu(),
                                    rm.Conv2d(192, filter=3, padding=1, stride=1),
                                    rm.Relu(),
                                    rm.BatchNormalize(mode='feature'),
                                    rm.MaxPool2d(filter=3, stride=2, padding=1),
                                    InceptionV1Block(),
                                    InceptionV1Block([128, 128, 192, 32, 96, 64]),
                                    rm.MaxPool2d(filter=3, stride=2),
                                    InceptionV1Block([192, 96, 208, 16, 48, 64]),
                                    ])

        self.aux1 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3),
                                   rm.Flatten(),
                                   rm.Dense(1024),
                                   rm.Dense(num_class)])

        self.base2 = rm.Sequential([InceptionV1Block([160, 112, 224, 24, 64, 64]),
                                    InceptionV1Block([128, 128, 256, 24, 64, 64]),
                                    InceptionV1Block([112, 144, 288, 32, 64, 64])])

        self.aux2 = rm.Sequential([rm.AveragePool2d(filter=5, stride=3),
                                   rm.Flatten(),
                                   rm.Dense(1024),
                                   rm.Dense(num_class)])

        self.base3 = rm.Sequential([InceptionV1Block([256, 160, 320, 32, 128, 128]),
                                    InceptionV1Block([256, 160, 320, 32, 128, 128]),
                                    InceptionV1Block([192, 384, 320, 48, 128, 128]),
                                    rm.AveragePool2d(filter=7, stride=1),
                                    rm.Flatten()])
        self.aux3 = rm.Dense(num_class)
Пример #30
0
    def __init__(self, num_class):

        self.block1 = layer_factory(channel=64, conv_layer_num=2)
        self.block2 = layer_factory(channel=128, conv_layer_num=2)
        self.block3 = layer_factory(channel=256, conv_layer_num=3)
        self.block4 = layer_factory(channel=512, conv_layer_num=3)
        self.block5 = layer_factory(channel=512, conv_layer_num=3)

        self.fc6 = rm.Conv2d(4096, filter=7, padding=3)
        self.dr1 = rm.Dropout(0.5)
        self.fc7 = rm.Conv2d(4096, filter=1)
        self.dr2 = rm.Dropout(0.5)

        self.score_fr = rm.Conv2d(num_class, filter=1)
        self.upscore2 = rm.Deconv2d(num_class, filter=2, stride=2, padding=0)
        self.upscore8 = rm.Deconv2d(num_class, filter=8, stride=8, padding=0)

        self.score_pool3 = rm.Conv2d(num_class, filter=1)
        self.score_pool4 = rm.Conv2d(num_class, filter=1)

        self.upscore_pool4 = rm.Deconv2d(num_class,
                                         filter=2,
                                         stride=2,
                                         padding=0)