Beispiel #1
0
 def __init__(self, in_channels, expansion=2, bn_args={'momentum': 0.01}):
     super().__init__()
     channels = in_channels // 4
     out_channels = in_channels * expansion
     self.bn_shrink = nn.BatchNorm2d(in_channels, **bn_args)
     self.activation_shrink = nn.ReLU6()
     self.conv_shrink = nn.Conv2d(in_channels, channels, 1)
     self.bn_3x3 = nn.BatchNorm2d(channels, **bn_args)
     self.activation_3x3 = nn.ReLU6()
     self.conv_3x3 = nn.Conv2d(channels, channels, 3, stride=2, padding=1)
     self.bn_expand = nn.BatchNorm2d(channels, **bn_args)
     self.activation_expand = nn.ReLU6()
     self.conv_expand = nn.Conv2d(channels, out_channels, 1)
     self.conv_shortcut = nn.Conv2d(in_channels,
                                    out_channels,
                                    3,
                                    stride=2,
                                    padding=1)
     # Parameter initialization
     for layer in [
             self.conv_expand, self.conv_3x3, self.conv_shrink,
             self.conv_shortcut
     ]:
         init.kaiming_uniform_(layer.weight,
                               mode='fan_in',
                               nonlinearity='relu')
         init.zeros_(layer.bias)
     for layer in [self.bn_expand, self.bn_3x3, self.bn_shrink]:
         init.ones_(layer.weight)
         init.zeros_(layer.bias)
Beispiel #2
0
 def __init__(self,
              in_channels,
              repeats,
              expansions,
              features_num=1,
              bn_args={'momentum': 0.01}):
     super().__init__()
     channels = 64
     self.features_num = features_num
     self.grid_size = 2**(len(repeats) + 1)
     self.conv_init = nn.Conv2d(in_channels,
                                channels,
                                kernel_size=7,
                                stride=2,
                                padding=3)
     self.bn_init = nn.BatchNorm2d(channels, **bn_args)
     self.activation_init = nn.ReLU6()
     self.blocks = nn.ModuleList()
     for reps, expansion in zip(repeats, expansions):
         self.blocks.append(Block(channels, reps, expansion, bn_args))
         channels = channels * expansion
     self.bn_final = nn.BatchNorm2d(channels, **bn_args)
     self.activation_final = nn.ReLU6()
     # Parameter initialization
     init.kaiming_uniform_(self.conv_init.weight,
                           mode='fan_in',
                           nonlinearity='relu')
     init.zeros_(self.conv_init.bias)
     init.ones_(self.bn_init.weight)
     init.zeros_(self.bn_init.bias)
     init.ones_(self.bn_final.weight)
     init.zeros_(self.bn_final.bias)
Beispiel #3
0
 def reset_parameters(self):
     init.kaiming_uniform_(self.weight, a=math.sqrt(5))
     init.ones_(self.scale)
     if self.bias is not None:
         fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
         bound = 1 / math.sqrt(fan_in)
         init.uniform_(self.bias, -bound, bound)
Beispiel #4
0
def init_bn(model):
    if type(model) in [torch.nn.InstanceNorm2d, torch.nn.BatchNorm2d]:
        init.ones_(model.weight)
        init.zeros_(model.bias)

    elif type(model) in [torch.nn.Conv2d]:
        init.kaiming_uniform_(model.weight)
Beispiel #5
0
 def init_scale(self, force_init=False):
     if force_init:
         self.scale = nn.Parameter(
             torch.Tensor(self.conv.out_channels).to(self.weight.device))
         init.ones_(self.scale)
     else:
         self.scale = None
Beispiel #6
0
 def __init__(self,
              in_channels,
              out_channels,
              stride=2,
              expansion=6,
              bn_args={'momentum': 0.01}):
     super().__init__()
     channels = expansion * in_channels
     self.conv_expand = nn.Conv2d(in_channels, channels, 1)
     self.bn_expand = nn.BatchNorm2d(channels, **bn_args)
     self.activation_expand = nn.ReLU6()
     self.conv_dwise = nn.Conv2d(channels,
                                 channels,
                                 3,
                                 stride=stride,
                                 padding=1,
                                 groups=channels)
     self.bn_dwise = nn.BatchNorm2d(channels, **bn_args)
     self.activation_dwise = nn.ReLU6()
     self.conv_shrink = nn.Conv2d(channels, out_channels, 1)
     self.bn_shrink = nn.BatchNorm2d(out_channels, **bn_args)
     # Parameter initialization
     for layer in [self.conv_expand, self.conv_dwise, self.conv_shrink]:
         init.kaiming_uniform_(layer.weight,
                               mode='fan_in',
                               nonlinearity='relu')
         init.zeros_(layer.bias)
     for layer in [self.bn_expand, self.bn_dwise, self.bn_shrink]:
         init.ones_(layer.weight)
         init.zeros_(layer.bias)
Beispiel #7
0
    def __init__(self,
                 in_channels,
                 features_num=1,
                 out_channels=(32, 16, 24, 32, 64, 96, 160, 320),
                 repeats=(1, 2, 3, 4, 3, 3, 1),
                 strides=(1, 2, 2, 2, 1, 2, 1),
                 expansions=(1, 6, 6, 6, 6, 6, 6),
                 bn_args={'momentum': 0.01}):

        super().__init__()
        self.grid_size = np.prod(strides) * 2
        self.features_num = features_num
        self.conv_init = nn.Conv2d(in_channels,
                                   out_channels[0],
                                   3,
                                   stride=2,
                                   padding=1)
        self.bn_init = nn.BatchNorm2d(out_channels[0], **bn_args)
        self.activation_init = nn.ReLU6()
        self.blocks = nn.ModuleList()
        for i in range(len(expansions)):
            self.blocks.append(
                Block(out_channels[i], out_channels[i + 1], repeats[i],
                      strides[i], expansions[i], bn_args))
        # Parameter initialization
        init.kaiming_uniform_(self.conv_init.weight,
                              mode='fan_in',
                              nonlinearity='relu')
        init.zeros_(self.conv_init.bias)
        init.ones_(self.bn_init.weight)
        init.zeros_(self.bn_init.bias)
Beispiel #8
0
    def __init__(self,
                 in_channels,
                 features_num=1,
                 bn_args={'momentum': 0.01}):

        super().__init__()
        self.features_num = features_num
        self.conv_0 = nn.Conv2d(in_channels, 32, 3, stride=2, padding=1)
        self.bn_0 = nn.BatchNorm2d(32, **bn_args)
        self.relu_0 = nn.ReLU6()
        self.conv_1 = nn.Conv2d(32, 64, 3, padding=1)
        self.bn_1 = nn.BatchNorm2d(64, **bn_args)
        self.entryflow_1 = StridedBlock(64, 128, bn_args)
        self.entryflow_2 = StridedBlock(128, 256, bn_args)
        self.entryflow_3 = StridedBlock(256, 728, bn_args)
        self.middleflow = nn.Sequential(
            *[Block(728, 728, bn_args) for i in range(8)])
        self.exitflow = nn.Sequential(StridedBlock(728, 1024, bn_args),
                                      SepConv2d(1024, 1536, bn_args),
                                      nn.ReLU6(),
                                      SeparableConv2d(1536, 2048, bn_args))
        # # Parameter initialization
        init.kaiming_uniform_(self.conv_0.weight,
                              mode='fan_in',
                              nonlinearity='relu')
        init.zeros_(self.conv_0.bias)
        init.kaiming_uniform_(self.conv_1.weight,
                              mode='fan_in',
                              nonlinearity='relu')
        init.zeros_(self.conv_1.bias)
        init.ones_(self.bn_0.weight)
        init.zeros_(self.bn_0.bias)
        init.ones_(self.bn_1.weight)
        init.zeros_(self.bn_1.bias)
Beispiel #9
0
    def build(self, input_shape: TensorShape):
        if not self._built:
            if self.affine:
                self.weight = Parameter(torch.Tensor(self.input_filters)).to(
                    get_device())
                self.bias = Parameter(torch.Tensor(self.input_filters)).to(
                    get_device())
                init.ones_(self.weight)
                init.zeros_(self.bias)
            else:
                self.register_parameter('weight', None)
                self.register_parameter('bias', None)

            if self.track_running_stats:
                self.register_buffer(
                    'running_mean',
                    torch.zeros(self.input_filters).to(get_device()))
                self.register_buffer(
                    'running_var',
                    torch.ones(self.input_filters).to(get_device()))
                self.register_buffer(
                    'num_batches_tracked',
                    torch.tensor(0, dtype=torch.long).to(get_device()))
            else:
                self.register_parameter('running_mean', None)
                self.register_parameter('running_var', None)
                self.register_parameter('num_batches_tracked', None)
            self.reset_running_stats()
            self.to(get_device())
            self._built = True
Beispiel #10
0
 def __init__(self,
              in_channels,
              kernel_size,
              expansion=3,
              bn_args={'momentum': 0.01}):
     super().__init__()
     channels = expansion * in_channels
     self.conv_expand = nn.Conv2d(in_channels, channels, 1)
     self.bn_expand = nn.BatchNorm2d(channels, **bn_args)
     self.relu_expand = nn.ReLU6()
     self.conv_dw = nn.Conv2d(channels,
                              channels,
                              kernel_size,
                              padding=kernel_size // 2,
                              groups=channels)
     self.bn_dw = nn.BatchNorm2d(channels, **bn_args)
     self.relu_dw = nn.ReLU6()
     self.conv_shrink = nn.Conv2d(channels, in_channels, 1)
     self.bn_shrink = nn.BatchNorm2d(in_channels, **bn_args)
     # Parameter initialization
     for layer in [self.conv_expand, self.conv_dw, self.conv_shrink]:
         init.kaiming_uniform_(layer.weight,
                               mode='fan_in',
                               nonlinearity='relu')
         init.zeros_(layer.bias)
     for layer in [self.bn_expand, self.bn_dw, self.bn_shrink]:
         init.ones_(layer.weight)
         init.zeros_(layer.bias)
 def init_scale(self, force_init=False):
     if force_init:
         self.scale = nn.Parameter(
             torch.Tensor(self.linear.out_features).to(self.weight.device))
         init.ones_(self.scale)
     else:
         self.scale = None
Beispiel #12
0
 def __init__(self,
              in_channels,
              out_channels,
              kernel_size,
              stride,
              bn_args={'momentum': 0.01}):
     super().__init__()
     self.conv_dw = nn.Conv2d(in_channels,
                              in_channels,
                              kernel_size,
                              stride=stride,
                              padding=kernel_size // 2,
                              groups=in_channels)
     self.bn_dw = nn.BatchNorm2d(in_channels, **bn_args)
     self.relu_dw = nn.ReLU6()
     self.conv_1x1 = nn.Conv2d(in_channels, out_channels, 1)
     self.bn_1x1 = nn.BatchNorm2d(out_channels, **bn_args)
     # Parameter Initialization
     for layer in [self.conv_dw, self.conv_1x1]:
         init.kaiming_uniform_(layer.weight,
                               mode='fan_in',
                               nonlinearity='relu')
         init.zeros_(layer.bias)
     for layer in [self.bn_dw, self.bn_1x1]:
         init.ones_(layer.weight)
         init.zeros_(layer.bias)
Beispiel #13
0
def init_gamma(m):

    if isinstance(m, nn.Embedding):
        init.ones_(m.weight.data)

    if isinstance(m, nn.BatchNorm2d) and m.weight is not None:
        init.ones_(m.weight.data)
Beispiel #14
0
 def __init__(self, in_dim, out_dim, ini):
     #print("INIT_2")
     super().__init__()
     self.in_dim = in_dim
     self.out_dim = out_dim
     self.eps = 1e-10
     self.initial = ini
     self.G = Parameter(torch.DoubleTensor(out_dim, in_dim))
     self.nac = NeuralAccumulatorCell(in_dim, out_dim, ini)
     #self.register_parameter('bias', torch.Tensor(out_dim))
     self.bias = Parameter(torch.DoubleTensor(1,out_dim))
     
     if ini =='Kai_uni':
         init.kaiming_uniform_(self.G, a=math.sqrt(5))
         init.kaiming_uniform_(self.bias)
     if ini =='Xav_norm':
         init.xavier_normal_(self.G)
         init.xavier_normal_(self.bias)
     if ini =='Kai_norm':
         init.kaiming_normal_(self.G)
         init.kaiming_normal_(self.bias)
     if ini =='Zeros':
         init.zeros_(self.G)
         init.zeros_(self.bias)
     if ini =='Ones':
         init.ones_(self.G)
         init.ones_(self.bias)
Beispiel #15
0
    def _reset_parameters(self):
        init.xavier_normal_(self.q_weight)
        init.zeros_(self.q_bias)
        if self.symmetric:
            pass
        else:
            init.xavier_normal_(self.k_weight)
            init.zeros_(self.k_bias)
        if self.cum_value:
            init.ones_(self.v_weight)
            init.xavier_normal_(self.o_weight)
        else:
            orth = Tensor(self.d_hidden, self.pred_size)
            init.orthogonal_(orth)
            with pt.no_grad():
                self.v_weight.copy_(orth.unsqueeze(dim=1))
                self.o_weight.copy_(orth.T.repeat(self.n_output, 1))
        init.zeros_(self.v_bias)
        init.zeros_(self.o_bias)
        if self.add_autoreg:
            init.constant_(self.m_weight, 1.0 / self.cond_size)
            init.zeros_(self.m_bias)

        init.xavier_normal_(self.r_key)
        if self.add_autoreg and self.fix_ar_key:
            init.xavier_normal_(self.m_key)
Beispiel #16
0
 def _init_weight(self):
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             init.kaiming_normal_(m.weight)
         elif isinstance(m, nn.BatchNorm2d):
             init.ones_(m.weight)
             init.zeros_(m.bias)
Beispiel #17
0
 def weights_init(m):
     if isinstance(m, (nn.Conv2d, nn.Linear, nn.ConvTranspose2d)):
         init.xavier_normal_(m.weight, gain=0.02)
         if m.bias is not None:
             init.zeros_(m.bias)
     elif isinstance(m, nn.InstanceNorm2d) and m.affine:
         init.ones_(m.weight)
         init.zeros_(m.bias)
Beispiel #18
0
 def weights_init(m):
     if isinstance(m, (nn.Conv2d, nn.Linear)):
         init.normal_(m.weight, std=0.1)
         if m.bias is not None:
             init.zeros_(m.bias)
     elif isinstance(m, nn.BatchNorm2d):
         init.ones_(m.weight)
         init.zeros_(m.bias)
Beispiel #19
0
 def reset_parameters(self):
     self.reset_running_stats()
     if self.affine:
         init.ones_(self.weight[0, 0])
         init.zeros_(self.weight[1, 0])
         init.zeros_(self.weight[0, 1])
         init.ones_(self.weight[1, 1])
         init.zeros_(self.bias)
 def weights_init(m):
     if isinstance(m, nn.Conv1d):
         init.kaiming_uniform_(m.weight)
         if m.bias is not None:
             init.zeros_(m.bias)
     elif isinstance(m, nn.BatchNorm1d):
         init.ones_(m.weight)
         init.zeros_(m.bias)
Beispiel #21
0
 def reset_running_stats(self):
     if self.track_running_stats:
         self.running_mean.zero_()
         self.running_var.fill_(1)
         self.num_batches_tracked.zero_()
     if self.affine:
         init.ones_(self.weight)
         init.zeros_(self.bias)
Beispiel #22
0
 def weights_init(m):
     if isinstance(m, nn.Conv2d):
         init.kaiming_normal_(m.weight)
         if m.bias is not None:
             init.zeros_(m.bias)
     if isinstance(m, nn.BatchNorm2d):
         init.ones_(m.weight)
         init.zeros_(m.bias)
Beispiel #23
0
 def reset_parameters(self):
     self.reset_running_stats()
     if self.affine:  #*********
         if self.affine_flag == 2:
             init.uniform_(self.weight)
             init.zeros_(self.bias)
         elif self.affine_flag == 1:
             init.ones_(self.weight)
             init.zeros_(self.bias)
    def __init__(self,
                 startf=32,
                 maxf=256,
                 layer_count=3,
                 latent_size=128,
                 channels=3):
        super(Generator, self).__init__()
        self.maxf = maxf
        self.startf = startf
        self.layer_count = layer_count

        self.channels = channels
        self.latent_size = latent_size

        mul = 2**(self.layer_count - 1)

        inputs = min(self.maxf, startf * mul)
        self.const = Parameter(torch.Tensor(1, inputs, 4, 4))
        init.ones_(self.const)

        self.layer_to_resolution = [0 for _ in range(layer_count)]
        resolution = 2

        self.style_sizes = []

        to_rgb = nn.ModuleList()

        self.decode_block: nn.ModuleList[DecodeBlock] = nn.ModuleList()
        for i in range(self.layer_count):
            outputs = min(self.maxf, startf * mul)

            has_first_conv = i != 0
            fused_scale = resolution * 2 >= 128

            block = DecodeBlock(inputs,
                                outputs,
                                latent_size,
                                has_first_conv,
                                fused_scale=fused_scale,
                                layer=i)

            resolution *= 2
            self.layer_to_resolution[i] = resolution

            self.style_sizes += [
                2 * (inputs if has_first_conv else outputs), 2 * outputs
            ]

            to_rgb.append(ToRGB(outputs, channels))

            #print("decode_block%d %s styles in: %dl out resolution: %d" % (
            #    (i + 1), millify(count_parameters(block)), outputs, resolution))
            self.decode_block.append(block)
            inputs = outputs
            mul //= 2

        self.to_rgb = to_rgb
Beispiel #25
0
 def reset_parameters(self):
     for module in self.modules():
         if isinstance(module, nn.Conv2d) or isinstance(module, nn.Linear):
             init.xavier_normal_(module.weight)
             if module.bias is not None:
                 init.zeros_(module.bias)
         if isinstance(module, nn.BatchNorm2d):
             init.ones_(module.weight)
             init.zeros_(module.bias)
Beispiel #26
0
def reset_bn(module: nn.BatchNorm2d):
    if module.track_running_stats:
        module.running_mean.zero_()
        module.running_var.fill_(1 - module.eps)
        # we do not reset numer of tracked batches here
        # self.num_batches_tracked.zero_()
    if module.affine:
        init.ones_(module.weight)
        init.zeros_(module.bias)
Beispiel #27
0
 def __init_weights(self):
     """Initialize the extra layers """
     for m in self._classifier.modules():
         if isinstance(m, nn.Linear):
             if self._num_output == m.out_features:
                 init.normal_(m.weight.data, mean=0.0, std=0.01)
                 init.zeros_(m.bias.data)
             else:
                 init.normal_(m.weight.data, mean=0.0, std=0.005)
                 init.ones_(m.bias.data)
Beispiel #28
0
    def __init__(self, in_features, out_features, init_scale=1.0):
        super().__init__()
        self.in_features, self.out_features, self.init_scale = in_features, out_features, init_scale

        self.v = Parameter(torch.Tensor(out_features, in_features))
        self.g = Parameter(torch.Tensor(out_features))
        self.b = Parameter(torch.Tensor(out_features))

        init.normal_(self.v, 0., _WN_INIT_STDV)
        init.ones_(self.g)
        init.zeros_(self.b)
Beispiel #29
0
    def __init__(self, num_features, use_bias=True):
        super(ScaleLayer, self).__init__()
        self.weight = Parameter(torch.Tensor(num_features))
        init.ones_(self.weight)
        self.num_features = num_features

        if use_bias:
            self.bias = Parameter(torch.Tensor(num_features))
            init.zeros_(self.bias)
        else:
            self.bias = None
Beispiel #30
0
 def __init__(self, in_channels, compression = 2, bn_args={'momentum':0.01}):
     super().__init__()
     out_channels = in_channels // compression
     self.conv  = nn.Conv2d(in_channels, out_channels, 1)
     self.bn = nn.BatchNorm2d(out_channels, **bn_args)
     self.activation = nn.ReLU6()
     self.pool = nn.AvgPool2d(2)
     # Parameter initialization
     init.kaiming_uniform_(self.conv.weight, mode='fan_in', nonlinearity='relu')
     init.zeros_(self.conv.bias)
     init.ones_(self.bn.weight)
     init.zeros_(self.bn.bias)