Пример #1
0
    def __init__(self,
                 in_channels,
                 mid_channels,
                 out_channels,
                 stride=2,
                 downsample_fb=False):
        super().__init__()
        # In the original MSRA ResNet, stride=2 is on 1x1 convolution.
        # In Facebook ResNet, stride=2 is on 3x3 convolution.
        stride_1x1, stride_3x3 = (1, stride) if downsample_fb else (stride, 1)

        self.conv1 = L.Conv2d(in_channels,
                              mid_channels,
                              1,
                              stride_1x1,
                              0,
                              nobias=True)
        self.bn1 = L.BatchNorm()
        self.conv2 = L.Conv2d(mid_channels,
                              mid_channels,
                              3,
                              stride_3x3,
                              1,
                              nobias=True)
        self.bn2 = L.BatchNorm()
        self.conv3 = L.Conv2d(mid_channels, out_channels, 1, 1, 0, nobias=True)
        self.bn3 = L.BatchNorm()
        self.conv4 = L.Conv2d(in_channels,
                              out_channels,
                              1,
                              stride,
                              0,
                              nobias=True)
        self.bn4 = L.BatchNorm()
Пример #2
0
 def __init__(self, hidden_size=100):
     super().__init__()
     self.conv1_1 = L.Conv2d(16, kernel_size=3, stride=1, pad=1)
     self.conv1_2 = L.Conv2d(16, kernel_size=3, stride=1, pad=1)
     self.conv2_1 = L.Conv2d(32, kernel_size=3, stride=1, pad=1)
     self.conv2_2 = L.Conv2d(32, kernel_size=3, stride=1, pad=1)
     self.fc3 = L.Linear(hidden_size)
     self.fc4 = L.Linear(10)
Пример #3
0
 def __init__(self, in_channels, mid_channels):
     super().__init__()
     
     self.conv1 = L.Conv2d(in_channels, mid_channels, 1, 1, 0, nobias=True)
     self.bn1 = L.BatchNorm()
     self.conv2 = L.Conv2d(mid_channels, mid_channels, 3, 1, 1, nobias=True)
     self.bn2 = L.BatchNorm()
     self.conv3 = L.Conv2d(mid_channels, in_channels, 1, 1, 0, nobias=True)
     self.bn3 = L.BatchNorm()
Пример #4
0
 def __init__(self, latent_size):
     super().__init__()
     self.latent_size = latent_size
     self.conv1 = L.Conv2d(32, kernel_size=3, stride=1, pad=1)
     self.conv2 = L.Conv2d(64, kernel_size=3, stride=2, pad=1)
     self.conv3 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
     self.conv4 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
     self.linear1 = L.Linear(32)
     self.linear2 = L.Linear(latent_size)
     self.linear3 = L.Linear(latent_size)
Пример #5
0
 def __init__(self, hidden_size=100):
     super().__init__()
     self.conv1 = L.Conv2d(30, kernel_size=3, stride=1, pad=1)
     #self.conv2 = L.Conv2d(1, kernel_size=3, stride=1, pad=1)
     self.fc3 = L.Linear(hidden_size)
     #self.fc4 = L.Linear(hidden_size)
     self.fc5 = L.Linear(10)
Пример #6
0
    def __init__(self, n_layers=152, pretrained=False):
        super().__init__()

        if n_layers == 50:
            block = [3, 4, 6, 3]
        elif n_layers == 101:
            block = [3, 4, 23, 3]
        elif n_layers == 152:
            block = [3, 8, 36, 3]
        else:
            raise ValueError('The n_layers argument should be either 50, 101,'
                             ' or 152, but {} was given.'.format(n_layers))

        self.conv1 = L.Conv2d(3, 64, 7, 2, 3)
        self.bn1 = L.BatchNorm()
        self.res2 = BuildingBlock(block[0], 64, 64, 256, 1)
        self.res3 = BuildingBlock(block[1], 256, 128, 512, 2)
        self.res4 = BuildingBlock(block[2], 512, 256, 1024, 2)
        self.res5 = BuildingBlock(block[3], 1024, 512, 2048, 2)
        self.fc6 = L.Linear(1000)

        if pretrained:
            weights_path = utils.get_file(ResNet.WEIGHTS_PATH.format(n_layers))
            self.load_weights(weights_path)
Пример #7
0
use_gpu = dezero.cuda.gpu_enable
max_epoch = 5
batch_size = 128
hidden_size = 62

fc_channel, fc_height, fc_width = 128, 7, 7

gen = Sequential(L.Linear(1024), L.BatchNorm(), F.relu,
                 L.Linear(fc_channel * fc_height * fc_width), L.BatchNorm(),
                 F.relu,
                 lambda x: F.reshape(x, (-1, fc_channel, fc_height, fc_width)),
                 L.Deconv2d(fc_channel // 2, kernel_size=4, stride=2, pad=1),
                 L.BatchNorm(), F.relu,
                 L.Deconv2d(1, kernel_size=4, stride=2, pad=1), F.sigmoid)

dis = Sequential(L.Conv2d(64, kernel_size=4, stride=2, pad=1), F.leaky_relu,
                 L.Conv2d(128, kernel_size=4, stride=2, pad=1),
                 L.BatchNorm(), F.leaky_relu, F.flatten, L.Linear(1024),
                 L.BatchNorm(), F.leaky_relu, L.Linear(1), F.sigmoid)


def init_weight(dis, gen, hidden_size):
    # Input dummy data to initialize weights
    batch_size = 1
    z = np.random.rand(batch_size, hidden_size)
    fake_images = gen(z)
    dis(fake_images)

    for l in dis.layers + gen.layers:
        classname = l.__class__.__name__
        if classname.lower() in ('conv2d', 'linear', 'deconv2d'):
Пример #8
0
    def __init__(self, pretrained=False):
        super().__init__()
        self.conv1_1 = L.Conv2d(3, 64, 3, 1, 1)
        self.conv1_2 = L.Conv2d(64, 64, 3, 1, 1)
        self.conv2_1 = L.Conv2d(64, 128, 3, 1, 1)
        self.conv2_2 = L.Conv2d(128, 128, 3, 1, 1)
        self.conv3_1 = L.Conv2d(128, 256, 3, 1, 1)
        self.conv3_2 = L.Conv2d(256, 256, 3, 1, 1)
        self.conv3_3 = L.Conv2d(256, 256, 3, 1, 1)
        self.conv4_1 = L.Conv2d(256, 512, 3, 1, 1)
        self.conv4_2 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv4_3 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv5_1 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv5_2 = L.Conv2d(512, 512, 3, 1, 1)
        self.conv5_3 = L.Conv2d(512, 512, 3, 1, 1)
        self.fc6 = L.Linear(4096)
        self.fc7 = L.Linear(4096)
        self.fc8 = L.Linear(1000)

        if pretrained:
            weights_path = utils.get_file(VGG16.WEIGHTS_PATH)
            self.load_weights(weights_path)
Пример #9
0
    def __init__(self, pretrained=False):
        super().__init__()
        self.conv1_1 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
        self.conv1_2 = L.Conv2d(64, kernel_size=3, stride=1, pad=1)
        self.conv2_1 = L.Conv2d(128, kernel_size=3, stride=1, pad=1)
        self.conv2_2 = L.Conv2d(128, kernel_size=3, stride=1, pad=1)
        self.conv3_1 = L.Conv2d(256, kernel_size=3, stride=1, pad=1)
        self.conv3_2 = L.Conv2d(256, kernel_size=3, stride=1, pad=1)
        self.conv3_3 = L.Conv2d(256, kernel_size=3, stride=1, pad=1)
        self.conv4_1 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
        self.conv4_2 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
        self.conv4_3 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
        self.conv5_1 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
        self.conv5_2 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
        self.conv5_3 = L.Conv2d(512, kernel_size=3, stride=1, pad=1)
        self.fc6 = L.Linear(4096)
        self.fc7 = L.Linear(4096)
        self.fc8 = L.Linear(1000)

        if pretrained:
            weights_path = utils.get_file(VGG16.WEIGHTS_PATH)
            self.load_weights(weights_path)
Пример #10
0
 def __init__(self):
     super().__init__()
     self.to_shape = (64, 14, 14)  # (C, H, W)
     self.linear = L.Linear(np.prod(self.to_shape))
     self.deconv = L.Deconv2d(32, kernel_size=4, stride=2, pad=1)
     self.conv = L.Conv2d(1, kernel_size=3, stride=1, pad=1)
Пример #11
0
import dezero.layers as L
from dezero import DataLoader
from dezero.models import Sequential
from dezero.optimizers import Adam

C, H, W = 512, 3, 3
G = Sequential(L.Linear(C * H * W), F.Reshape((-1, C, H, W)), L.BatchNorm(),
               F.ReLU(), L.Deconv2d(C // 2, kernel_size=2, stride=2, pad=1),
               L.BatchNorm(), F.ReLU(),
               L.Deconv2d(C // 4,
                          kernel_size=2, stride=2, pad=1), L.BatchNorm(),
               F.ReLU(), L.Deconv2d(C // 8, kernel_size=2, stride=2, pad=1),
               L.BatchNorm(), F.ReLU(),
               L.Deconv2d(1, kernel_size=3, stride=3, pad=1), F.Sigmoid())

D = Sequential(L.Conv2d(64, kernel_size=3, stride=3, pad=1), F.LeakyReLU(0.1),
               L.Conv2d(128, kernel_size=2, stride=2, pad=1), L.BatchNorm(),
               F.LeakyReLU(0.1), L.Conv2d(256, kernel_size=2, stride=2, pad=1),
               L.BatchNorm(), F.LeakyReLU(0.1),
               L.Conv2d(512, kernel_size=2, stride=2, pad=1), L.BatchNorm(),
               F.LeakyReLU(0.1), F.flatten, L.Linear(1))

D.layers[0].W.name = 'conv1_W'
D.layers[0].b.name = 'conv1_b'


def init_weight(D, G, hidden_size):
    # dummy data
    batch_size = 1
    z = np.random.rand(batch_size, hidden_size)
    fake_images = G(z)
Пример #12
0
 def __init__(self):
     super().__init__()
     self.conv1_1 = L.Conv2d(64,kernel_size=3,stride=1,pad=1)
     self.conv1_2 = L.Conv2d(64,kernel_size=3,stride=1,pad=1)
     self.conv2_1 = L.Conv2d(128,kernel_size=3,stride=1,pad=1)
     self.conv2_2 = L.Conv2d(128,kernel_size=3,stride=1,pad=1)
     self.conv3_1 = L.Conv2d(256,kernel_size=3,stride=1,pad=1)
     self.conv3_2 = L.Conv2d(256,kernel_size=3,stride=1,pad=1)
     self.conv3_3 = L.Conv2d(256,kernel_size=3,stride=1,pad=1)
     self.conv4_1 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv4_2 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv4_3 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv5_1 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv5_2 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.conv5_3 = L.Conv2d(512,kernel_size=3,stride=1,pad=1)
     self.fc6 = L.Linear(4096)
     self.fc7 = L.Linear(4096)
     self.fc8 = L.Linear(1000)
 def __init__(self):
     super().__init__()
     self.conv1_1 = L.Conv2d(3, 64, 3, 1, 1)
     self.conv1_2 = L.Conv2d(64, 64, 3, 1, 1)
     self.conv2_1 = L.Conv2d(64, 128, 3, 1, 1)
     self.conv2_2 = L.Conv2d(128, 128, 3, 1, 1)
     self.conv3_1 = L.Conv2d(128, 256, 3, 1, 1)
     self.conv3_2 = L.Conv2d(256, 256, 3, 1, 1)
     self.conv3_3 = L.Conv2d(256, 256, 3, 1, 1)
     self.conv4_1 = L.Conv2d(256, 512, 3, 1, 1)
     self.conv4_2 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv4_3 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv5_1 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv5_2 = L.Conv2d(512, 512, 3, 1, 1)
     self.conv5_3 = L.Conv2d(512, 512, 3, 1, 1)
     self.fc6 = L.Linear(512 * 7 * 7, 4096)
     self.fc7 = L.Linear(4096, 4096)
     self.fc8 = L.Linear(4096, 1000)