Exemple #1
0
    def __init__(self, num_class):
        init_deconv = DeconvInitializer()
        self.block1 = layer_factory(channel=64, conv_layer_num=2, first=True)
        self.block2 = layer_factory(channel=128, conv_layer_num=2)
        self.block3 = layer_factory(channel=256, conv_layer_num=3)
        self.block4 = layer_factory(channel=512, conv_layer_num=3)
        self.block5 = layer_factory(channel=512, conv_layer_num=3)

        self.fc6 = rm.Conv2d(4096, filter=7, stride=1, padding=0)
        self.dr1 = rm.Dropout(dropout_ratio=0.5)
        self.fc7 = rm.Conv2d(4096, filter=1, stride=1, padding=0)
        self.dr2 = rm.Dropout(dropout_ratio=0.5)

        self.score_fr = rm.Conv2d(num_class, filter=1, stride=1,
                                  padding=0)  # n_classes
        self.score_pool4 = rm.Conv2d(num_class, filter=1, padding=0)

        self.upscore2 = rm.Deconv2d(num_class,
                                    filter=4,
                                    stride=2,
                                    padding=0,
                                    ignore_bias=True,
                                    initializer=init_deconv)  # n_classes
        self.upscore16 = rm.Deconv2d(num_class,
                                     filter=32,
                                     stride=16,
                                     padding=0,
                                     ignore_bias=True,
                                     initializer=init_deconv)  # n_classes
Exemple #2
0
    def __init__(self, num_class=1):
        self.conv1_1 = rm.Conv2d(64, padding=1, filter=3)
        self.bn1_1 = rm.BatchNormalize(mode='feature')
        self.conv1_2 = rm.Conv2d(64, padding=1, filter=3)
        self.bn1_2 = rm.BatchNormalize(mode='feature')
        self.conv2_1 = rm.Conv2d(128, padding=1, filter=3)
        self.bn2_1 = rm.BatchNormalize(mode='feature')
        self.conv2_2 = rm.Conv2d(128, padding=1, filter=3)
        self.bn2_2 = rm.BatchNormalize(mode='feature')
        self.conv3_1 = rm.Conv2d(256, padding=1, filter=3)
        self.bn3_1 = rm.BatchNormalize(mode='feature')
        self.conv3_2 = rm.Conv2d(256, padding=1, filter=3)
        self.bn3_2 = rm.BatchNormalize(mode='feature')
        self.conv4_1 = rm.Conv2d(512, padding=1, filter=3)
        self.bn4_1 = rm.BatchNormalize(mode='feature')
        self.conv4_2 = rm.Conv2d(512, padding=1, filter=3)
        self.bn4_2 = rm.BatchNormalize(mode='feature')
        self.conv5_1 = rm.Conv2d(1024, padding=1, filter=3)
        self.bn5_1 = rm.BatchNormalize(mode='feature')
        self.conv5_2 = rm.Conv2d(1024, padding=1, filter=3)
        self.bn5_2 = rm.BatchNormalize(mode='feature')

        self.deconv1 = rm.Deconv2d(512, stride=2)
        self.conv6_1 = rm.Conv2d(256, padding=1)
        self.conv6_2 = rm.Conv2d(256, padding=1)
        self.deconv2 = rm.Deconv2d(256, stride=2)
        self.conv7_1 = rm.Conv2d(128, padding=1)
        self.conv7_2 = rm.Conv2d(128, padding=1)
        self.deconv3 = rm.Deconv2d(128, stride=2)
        self.conv8_1 = rm.Conv2d(64, padding=1)
        self.conv8_2 = rm.Conv2d(64, padding=1)
        self.deconv4 = rm.Deconv2d(64, stride=2)
        self.conv9 = rm.Conv2d(num_class, filter=1)
Exemple #3
0
def test_gpu_node_deconvolution2d(a):
    with use_cuda():

        layer = rm.Deconv2d(channel=32)
        layer.params["w"] = rm.Variable(np.random.rand(3, 32, 3, 3))
        layer.params["b"] = rm.Variable(np.random.rand(1, 32, 1, 1))

        g1 = Variable(a)
        g2 = layer(g1)
        g3 = rm.sum(g2)
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g2.to_cpu()
        g3.to_cpu()

    c2 = layer(g1)
    c3 = rm.sum(c2)
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g2, c2)
    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Exemple #4
0
def layer_factory_deconv(channel_list=[512, 256]):
    layers = []
    layers.append(
        rm.Conv2d(channel=channel_list[0],
                  padding=1,
                  filter=3,
                  initializer=GlorotUniform()))
    layers.append(rm.Relu())
    if 'ceil_mode' in inspect.signature(rm.Deconv2d).parameters:
        layers.append(
            rm.Deconv2d(channel=channel_list[1],
                        padding=1,
                        filter=3,
                        stride=2,
                        initializer=GlorotUniform(),
                        ceil_mode=True))
    else:
        layers.append(
            Deconv2d(channel=channel_list[1],
                     padding=1,
                     filter=3,
                     stride=2,
                     initializer=GlorotUniform(),
                     ceil_mode=True))
    layers.append(rm.Relu())
    return rm.Sequential(layers)
 def __init__(
         self,
         latent_dim = 10,
         output_shape = (28, 28), 
         batch_normal = False,
         dropout = False,
         min_channels = 16,
     ):
     self.batch_normal = batch_normal
     self.latent_dim = latent_dim
     self.output_shape = output_shape
     self.dropout = dropout
     self.min_channels = min_channels
     print('--- Generator Network ---')
     parameters = []
     print_params = []
     dim = output_shape[0]
     channels = self.min_channels
     while dim%2 == 0 and dim > 2:
         parameters.append(rm.Deconv2d(
             channel=channels, stride=2, filter=2))
         if batch_normal:
             parameters.append(rm.BatchNormalize())
         dim = dim // 2
         print_params.append([dim, channels])
         channels *= 2
     if dim%2 == 1:
         parameters.append(rm.Deconv2d(
             channel=channels, stride=2, filter=3))
         if batch_normal:
             parameters.append(rm.BatchNormalize())
         dim = (dim - 1) // 2
         print_params.append([dim, channels])
         channels *= 2
     parameters.reverse()
     print_params.reverse()
     print('Dense {}x{}x{} & Reshape'.format(dim, dim,channels))
     self.channels = channels
     self.transform = rm.Dense(channels*1*dim*dim)
     for item in print_params:
         print('Deconv2d to {}x{} {}ch '.format(
             item[0], item[0], item[1]))
     self.hidden = rm.Sequential(parameters)
     self.output = rm.Conv2d(channel=1,stride=1,filter=1)
     print('Conv2d to {}x{} 1ch'.format(
         output_shape[0], output_shape[0]))
     self.dim = dim
Exemple #6
0
    def __init__(self, num_class):
        self.block1 = layer_factory(channel=64, conv_layer_num=2)
        self.block2 = layer_factory(channel=128, conv_layer_num=2)
        self.block3 = layer_factory(channel=256, conv_layer_num=3)
        self.block4 = layer_factory(channel=512, conv_layer_num=3)
        self.block5 = layer_factory(channel=512, conv_layer_num=3)

        self.fc6 = rm.Conv2d(4096, filter=7, padding=3)
        self.fc7 = rm.Conv2d(4096, filter=1)

        self.score_fr = rm.Conv2d(num_class, filter=1)
        self.score_pool4 = rm.Conv2d(num_class, filter=1)

        self.upscore2 = rm.Deconv2d(num_class, filter=2, stride=2, padding=0)
        self.upscore16 = rm.Deconv2d(num_class,
                                     filter=16,
                                     stride=16,
                                     padding=0)
Exemple #7
0
def test_gpu_node_upconvolution2d(a):
    with use_cuda():

        layer = rm.Deconv2d(channel=32)

        g1 = Variable(a)
        g3 = rm.sum(layer(g1))
        g = g3.grad()
        g_g1 = g.get(layer.params["w"])
        g_g2 = g.get(layer.params["b"])
        g_g3 = g.get(g1)
        g3.to_cpu()

    c3 = rm.sum(layer(g1))
    c = c3.grad()
    c_g1 = c.get(layer.params["w"])
    c_g2 = c.get(layer.params["b"])
    c_g3 = g.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
    close(c_g2, g_g2)
    close(c_g3, g_g3)
Exemple #8
0
    def __init__(
            self,
            input_params=32768,
            first_shape=(1, 32, 32, 32),
            output_shape=(1, 1, 64, 64),
            check_network=False,
            batchnormal=True,
            dropout=False,
            down_factor=1.6,
            act=rm.Relu(),
            last_act=rm.Sigmoid(),
    ):
        self.input_params = input_params
        self.latent_dim = input_params
        self.first_shape = first_shape
        self.output_shape = output_shape
        self.act = act
        self.last_act = last_act
        self.down_factor = down_factor

        def decide_factor(src, dst):
            factor = np.log(src / dst) / np.log(2)
            if factor % 1 == 0:
                return factor
            return np.ceil(factor)

        ch = first_shape[1]
        v_factor = decide_factor(output_shape[2], first_shape[2])
        h_factor = decide_factor(output_shape[3], first_shape[3])
        v_dim, h_dim = first_shape[2], first_shape[3]
        parameters = []
        check_params = np.array(first_shape[1:]).prod()
        self.trans = False
        if input_params != check_params:
            if check_network:
                print('--- Decoder Network ---')
                print('inserting Dense({})'.format(check_params))
            self.trans = rm.Dense(check_params)
        while v_factor != 0 or h_factor != 0:
            if batchnormal:
                parameters.append(rm.BatchNormalize())
                if check_network:
                    print('BN ', end='')
            stride = (2 if v_factor > 0 else 1, 2 if h_factor > 0 else 1)
            if check_network:
                print('transpose2d ch={}, filter=2, stride={}'.format(
                    ch, stride))
            parameters.append(rm.Deconv2d(channel=ch, filter=2, stride=stride))
            if self.act:
                parameters.append(self.act)
            if ch > output_shape[1]:
                ch = int(np.ceil(ch / self.down_factor))
            v_dim = v_dim * 2 if v_factor > 0 else v_dim + 1
            h_dim = h_dim * 2 if h_factor > 0 else h_dim + 1
            v_factor = v_factor - 1 if v_factor > 0 else 0
            h_factor = h_factor - 1 if h_factor > 0 else 0
        if v_dim > output_shape[2] or h_dim > output_shape[2]:
            last_filter = (v_dim - output_shape[2] + 1,
                           h_dim - output_shape[3] + 1)
            if check_network:
                print('conv2d filter={}, stride=1'.format(last_filter))
            parameters.append(
                rm.Conv2d(channel=output_shape[1],
                          filter=last_filter,
                          stride=1))
        self.parameters = rm.Sequential(parameters)
        if check_network:
            self.forward(np.zeros((first_shape[0], input_params)),
                         print_parameter=True)