Ejemplo n.º 1
0
    def __init__(self, block, layers, num_classes=10):
        super(FlatResNet32, self).__init__()

        self.inplanes = 16
        self.conv1 = base.conv3x3(3, 16)
        self.bn1 = nn.BatchNorm2d(16)
        self.relu = nn.ReLU(inplace=True)
        self.avgpool = nn.AvgPool2d(8)

        strides = [1, 2, 2]
        filt_sizes = [16, 32, 64]
        self.blocks, self.ds = [], []
        for idx, (filt_size, num_blocks,
                  stride) in enumerate(zip(filt_sizes, layers, strides)):
            blocks, ds = self._make_layer(block,
                                          filt_size,
                                          num_blocks,
                                          stride=stride)
            self.blocks.append(nn.ModuleList(blocks))
            self.ds.append(ds)

        self.blocks = nn.ModuleList(self.blocks)
        self.ds = nn.ModuleList(self.ds)
        self.fc = nn.Linear(64 * block.expansion, num_classes)
        self.fc_dim = 64 * block.expansion

        self.layer_config = layers

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 2
0
    def __init__(self, input_dim, output_dim, norm_layer, use_dropout, gpu_ids, downsample=False):
        super().__init__(input_dim, None, norm_layer, use_dropout, gpu_ids)
        self.inplanes = 64
        self.downsample = downsample

        stride = 2 if self.downsample else 1

        self.local_ds = nn.Sequential(OrderedDict([
            ('c7s1-32', nn.Sequential(
                nn.ReflectionPad2d(3),
                nn.Conv2d(input_dim, 32, 7),
                self.norm_layer(32),
                self.activation)),
            ('d64', nn.Sequential(
                conv3x3(32, 64, stride=stride),
                self.norm_layer(64),
                self.activation)),
        ]))

        conv = nn.ConvTranspose2d(64, 32, 3, 2, 1, output_padding=1) if self.downsample else conv3x3(64, 32)
        self.local_us = nn.Sequential(OrderedDict([
            ('R64', self._make_layer(self.block, 64, 3)),
            ('u32', nn.Sequential(
                conv,
                self.norm_layer(32),
                self.activation)),
            ('c7s1-3', nn.Sequential(
                nn.ReflectionPad2d(3),
                nn.Conv2d(32, output_dim, 7),
                nn.Tanh()))
        ]))
        initialize_weights(self, nn.init.kaiming_normal_)
Ejemplo n.º 3
0
    def __init__(self, block, layers, num_classes=[10]):
        super(FlatResNet26, self).__init__()

        nb_tasks = len(num_classes)
        factor = config_task.factor
        self.in_planes = int(32 * factor)
        self.conv1 = conv3x3(3, int(32 * factor))
        self.bn1 = nn.BatchNorm2d(int(32 * factor))

        strides = [2, 2, 2]
        filt_sizes = [64, 128, 256]
        self.blocks, self.ds = [], []

        for idx, (filt_size, num_blocks,
                  stride) in enumerate(zip(filt_sizes, layers, strides)):
            blocks, ds = self._make_layer(block,
                                          filt_size,
                                          num_blocks,
                                          stride=stride)
            self.blocks.append(nn.ModuleList(blocks))
            self.ds.append(ds)

        self.blocks = nn.ModuleList(self.blocks)
        self.ds = nn.ModuleList(self.ds)

        self.bns = nn.ModuleList([
            nn.Sequential(nn.BatchNorm2d(int(256 * factor)), nn.ReLU(True))
            for i in range(nb_tasks)
        ])
        self.avgpool = nn.AdaptiveAvgPool2d(1)

        self.linears = nn.ModuleList([
            nn.Linear(int(256 * factor), num_classes[i])
            for i in range(nb_tasks)
        ])

        self.layer_config = layers
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
Ejemplo n.º 4
0
    def __init__(self, block, layers, num_classes=10):
        super(FlatResNet32, self).__init__()

        self.inplanes = 16
        self.conv1 = base.conv3x3(3, 16)
        self.bn1 = tf.keras.layers.BatchNormalization()
        self.relu = tf.keras.layers.ReLU()
        self.avgpool = tf.keras.layers.AveragePooling2D((8, 8), padding='same')

        strides = [1, 2, 2]
        filt_sizes = [16, 32, 64]
        self.blocks, self.ds = [], []
        for idx,(filt_size, num_blocks, stride) in enumerate(zip(filt_sizes, layers, strides)):
            blocks, ds = self._make_layer(block, filt_size, num_blocks, stride=stride)
            self.blocks.append(blocks)
            self.ds.append(ds)

        self.fc = tf.keras.layers.Dense(num_classes)
        self.fc_dim = 64 * block.expansion

        self.layer_config = layers
Ejemplo n.º 5
0
    def __init__(self, input_dim, output_dim, norm_layer, dropout, gpu_ids):
        super().__init__(layers=[9])
        self.norm_layer = norm_layer
        self.gpu_ids = gpu_ids
        self.activation = nn.ReLU(inplace=True)
        self.inplanes = 1024

        dropout = 0.5 if dropout else None

        encoder = nn.Sequential(OrderedDict([
            ('c7s1-64', nn.Sequential(
                nn.ReflectionPad2d(3),
                nn.Conv2d(input_dim, 64, 7),
                self.norm_layer(64),
                self.activation)),
            ('d128', nn.Sequential(
                conv3x3(64, 128, stride=2),
                self.norm_layer(128),
                self.activation)),
            ('d256', nn.Sequential(
                conv3x3(128, 256, stride=2),
                self.norm_layer(256),
                self.activation)),
            ('d512', nn.Sequential(
                conv3x3(256, 512, stride=2),
                self.norm_layer(512),
                self.activation)),
            ('d1024', nn.Sequential(
                conv3x3(512, 1024, stride=2),
                self.norm_layer(1024),
                self.activation)),
            ('R1024', self._make_layer(
                self.block, 1024, self.layers[0], dropout=dropout)),
        ]))

        decoder_layers = OrderedDict([
            ('u512', nn.Sequential(
                nn.ConvTranspose2d(
                    1024, 512, 3, 2, padding=1, output_padding=1),
                self.norm_layer(512),
                self.activation)),
            ('u256', nn.Sequential(
                nn.ConvTranspose2d(
                    512, 256, 3, 2, padding=1, output_padding=1),
                self.norm_layer(256),
                self.activation)),
            ('u128', nn.Sequential(
                nn.ConvTranspose2d(
                    256, 128, 3, 2, padding=1, output_padding=1),
                self.norm_layer(128),
                self.activation)),
            ('u64', nn.Sequential(
                nn.ConvTranspose2d(128, 64, 3, 2, padding=1, output_padding=1),
                self.norm_layer(64),
                self.activation)),
        ])
        if isinstance(output_dim, int):
            decoder_layers['c7s1-N'] = nn.Sequential(
                nn.ReflectionPad2d(3),
                nn.Conv2d(64, output_dim, 7),
                nn.Tanh())

        decoder = nn.Sequential(decoder_layers)
        self.global_gen = nn.Sequential(OrderedDict([('encoder', encoder), ('decoder', decoder)]))
        initialize_weights(self, nn.init.kaiming_normal_)