Beispiel #1
0
 def __init__(self):
     super(ResidualBlock, self).__init__()
     self.conv1 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn1 = BatchNorm2d(num_features=64,
                            act=tlx.ReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv2 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn2 = BatchNorm2d(num_features=64,
                            act=None,
                            gamma_init=G_init,
                            data_format='channels_first')
Beispiel #2
0
 def __init__(self):
     super(SRGAN_g,self).__init__()
     self.conv1 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(1,1), act=tlx.ReLU, padding='SAME', W_init=W_init)
     self.residual_block = self.make_layer()
     self.conv2  = Conv2d(out_channels=64, kernel_size=(3,3), stride=(1,1),padding='SAME', W_init=W_init, b_init = None)
     self.bn1 = BatchNorm2d(num_features=64, act=None, gamma_init=G_init)
     self.conv3 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(1,1),padding='SAME', W_init = W_init)
     self.subpiexlconv1 = SubpixelConv2d(scale=2, act = tlx.ReLU)
     self.conv4 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(1,1), padding='SAME', W_init=W_init)
     self.subpiexlconv2 = SubpixelConv2d(scale=2, act = tlx.ReLU)
     self.conv5 = Conv2d(3, kernel_size=(1,1), stride=(1,1), act=tlx.Tanh, padding='SAME', W_init=W_init)
Beispiel #3
0
 def __init__(self):
     super(SRGAN_g2, self).__init__()
     self.conv1 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first')
     self.residual_block = self.make_layer()
     self.conv2 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn1 = BatchNorm2d(act=None,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.upsample1 = UpSampling2d(data_format='channels_first',
                                   scale=(2, 2),
                                   method='bilinear')
     self.conv3 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn2 = BatchNorm2d(act=tlx.ReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.upsample2 = UpSampling2d(data_format='channels_first',
                                   scale=(4, 4),
                                   method='bilinear')
     self.conv4 = Conv2d(out_channels=32,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn3 = BatchNorm2d(act=tlx.ReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv5 = Conv2d(out_channels=3,
                         kernel_size=(1, 1),
                         stride=(1, 1),
                         act=tlx.Tanh,
                         padding='SAME',
                         W_init=W_init)
Beispiel #4
0
 def __init__(self, ):
     super(SRGAN_d2, self).__init__()
     self.conv1 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init)
     self.conv2 = Conv2d(out_channels=64, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn1 = BatchNorm2d( gamma_init=G_init)
     self.conv3 = Conv2d(out_channels=128, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn2 = BatchNorm2d( gamma_init=G_init)
     self.conv4 = Conv2d(out_channels=128, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn3 = BatchNorm2d(gamma_init=G_init)
     self.conv5 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn4 = BatchNorm2d( gamma_init=G_init)
     self.conv6 = Conv2d(out_channels=256, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn5 = BatchNorm2d( gamma_init=G_init)
     self.conv7 = Conv2d(out_channels=512, kernel_size=(3,3), stride=(1,1), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn6 = BatchNorm2d( gamma_init=G_init)
     self.conv8 = Conv2d(out_channels=512, kernel_size=(3,3), stride=(2,2), act=tlx.LeakyReLU(alpha=0.2), padding='SAME', W_init=W_init, b_init=None)
     self.bn7 = BatchNorm2d( gamma_init=G_init)
     self.flat = Flatten()
     self.dense1 = Linear(out_features=1024, act=tlx.LeakyReLU(alpha=0.2))
     self.dense2 = Linear(out_features=1)
Beispiel #5
0
def make_layers(config, batch_norm=False, end_with='outputs'):
    layer_list = []
    is_end = False
    for layer_group_idx, layer_group in enumerate(config):
        if isinstance(layer_group, list):
            for idx, layer in enumerate(layer_group):
                layer_name = layer_names[layer_group_idx][idx]
                n_filter = layer
                if idx == 0:
                    if layer_group_idx > 0:
                        in_channels = config[layer_group_idx - 2][-1]
                    else:
                        in_channels = 3
                else:
                    in_channels = layer_group[idx - 1]
                layer_list.append(
                    Conv2d(
                        out_channels=n_filter, kernel_size=(3, 3), stride=(1, 1), act=tlx.ReLU, padding='SAME',
                        in_channels=in_channels, name=layer_name, data_format='channels_first'
                    )
                )
                if batch_norm:
                    layer_list.append(BatchNorm(num_features=n_filter, data_format='channels_first'))
                if layer_name == end_with:
                    is_end = True
                    break
        else:
            layer_name = layer_names[layer_group_idx]
            if layer_group == 'M':
                layer_list.append(MaxPool2d(kernel_size=(2, 2), stride=(2, 2), padding='SAME', name=layer_name, data_format='channels_first'))
            elif layer_group == 'O':
                layer_list.append(Linear(out_features=1000, in_features=4096, name=layer_name))
            elif layer_group == 'F':
                layer_list.append(Flatten(name='flatten'))
            elif layer_group == 'fc1':
                layer_list.append(Linear(out_features=4096, act=tlx.ReLU, in_features=512 * 7 * 7, name=layer_name))
            elif layer_group == 'fc2':
                layer_list.append(Linear(out_features=4096, act=tlx.ReLU, in_features=4096, name=layer_name))
            if layer_name == end_with:
                is_end = True
        if is_end:
            break
    return Sequential(layer_list)
Beispiel #6
0
 def __init__(self):
     super(Vgg19_simple_api, self).__init__()
     """ conv1 """
     self.conv1 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.conv2 = Conv2d(out_channels=64,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.maxpool1 = MaxPool2d(kernel_size=(2, 2),
                               stride=(2, 2),
                               padding='SAME')
     """ conv2 """
     self.conv3 = Conv2d(out_channels=128,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.conv4 = Conv2d(out_channels=128,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.maxpool2 = MaxPool2d(kernel_size=(2, 2),
                               stride=(2, 2),
                               padding='SAME')
     """ conv3 """
     self.conv5 = Conv2d(out_channels=256,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.conv6 = Conv2d(out_channels=256,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.conv7 = Conv2d(out_channels=256,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.conv8 = Conv2d(out_channels=256,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.maxpool3 = MaxPool2d(kernel_size=(2, 2),
                               stride=(2, 2),
                               padding='SAME')
     """ conv4 """
     self.conv9 = Conv2d(out_channels=512,
                         kernel_size=(3, 3),
                         stride=(1, 1),
                         act=tlx.ReLU,
                         padding='SAME')
     self.conv10 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.conv11 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.conv12 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.maxpool4 = MaxPool2d(kernel_size=(2, 2),
                               stride=(2, 2),
                               padding='SAME')  # (batch_size, 14, 14, 512)
     """ conv5 """
     self.conv13 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.conv14 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.conv15 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.conv16 = Conv2d(out_channels=512,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=tlx.ReLU,
                          padding='SAME')
     self.maxpool5 = MaxPool2d(kernel_size=(2, 2),
                               stride=(2, 2),
                               padding='SAME')  # (batch_size, 7, 7, 512)
     """ fc 6~8 """
     self.flat = Flatten()
     self.dense1 = Linear(out_features=4096, act=tlx.ReLU)
     self.dense2 = Linear(out_features=4096, act=tlx.ReLU)
     self.dense3 = Linear(out_features=1000, act=tlx.identity)
Beispiel #7
0
 def __init__(self, dim=64):
     super(SRGAN_d, self).__init__()
     self.conv1 = Conv2d(out_channels=dim,
                         kernel_size=(4, 4),
                         stride=(2, 2),
                         act=tlx.LeakyReLU,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first')
     self.conv2 = Conv2d(out_channels=dim * 2,
                         kernel_size=(4, 4),
                         stride=(2, 2),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn1 = BatchNorm2d(num_features=dim * 2,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv3 = Conv2d(out_channels=dim * 4,
                         kernel_size=(4, 4),
                         stride=(2, 2),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn2 = BatchNorm2d(num_features=dim * 4,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv4 = Conv2d(out_channels=dim * 8,
                         kernel_size=(4, 4),
                         stride=(2, 2),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn3 = BatchNorm2d(num_features=dim * 8,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv5 = Conv2d(out_channels=dim * 16,
                         kernel_size=(4, 4),
                         stride=(2, 2),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn4 = BatchNorm2d(num_features=dim * 16,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv6 = Conv2d(out_channels=dim * 32,
                         kernel_size=(4, 4),
                         stride=(2, 2),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn5 = BatchNorm2d(num_features=dim * 32,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv7 = Conv2d(out_channels=dim * 16,
                         kernel_size=(1, 1),
                         stride=(1, 1),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn6 = BatchNorm2d(num_features=dim * 16,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv8 = Conv2d(out_channels=dim * 8,
                         kernel_size=(1, 1),
                         stride=(1, 1),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn7 = BatchNorm2d(num_features=dim * 8,
                            act=None,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv9 = Conv2d(out_channels=dim * 2,
                         kernel_size=(1, 1),
                         stride=(1, 1),
                         act=None,
                         padding='SAME',
                         W_init=W_init,
                         data_format='channels_first',
                         b_init=None)
     self.bn8 = BatchNorm2d(num_features=dim * 2,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv10 = Conv2d(out_channels=dim * 2,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=None,
                          padding='SAME',
                          W_init=W_init,
                          data_format='channels_first',
                          b_init=None)
     self.bn9 = BatchNorm2d(num_features=dim * 2,
                            act=tlx.LeakyReLU,
                            gamma_init=G_init,
                            data_format='channels_first')
     self.conv11 = Conv2d(out_channels=dim * 8,
                          kernel_size=(3, 3),
                          stride=(1, 1),
                          act=None,
                          padding='SAME',
                          W_init=W_init,
                          data_format='channels_first',
                          b_init=None)
     self.bn10 = BatchNorm2d(num_features=dim * 8,
                             gamma_init=G_init,
                             data_format='channels_first')
     self.add = Elementwise(combine_fn=tlx.add, act=tlx.LeakyReLU)
     self.flat = Flatten()
     self.dense = Linear(out_features=1, W_init=W_init)