예제 #1
0
 def init_weights(self):
     """
     Initialize weights.
     """
     for conv in [self.conv1, self.conv2, self.conv3]:
         init.xavier_uniform(conv.weight, gain=1)
         init.constant(conv.bias, 0.1)
예제 #2
0
  def __init__(
      self,
      last_conv_stride=1,
      last_conv_dilation=1,
      num_stripes=6,
      local_conv_out_channels=256,
      num_classes=0
  ):
    super(PCBModel, self).__init__()

    self.base = resnet50(
      pretrained=True,
      last_conv_stride=last_conv_stride,
      last_conv_dilation=last_conv_dilation)
    self.num_stripes = num_stripes

    self.local_conv_list = nn.ModuleList()
    for _ in range(num_stripes):
      self.local_conv_list.append(nn.Sequential(
        nn.Conv2d(2048, local_conv_out_channels, 1),
        nn.BatchNorm2d(local_conv_out_channels),
        nn.ReLU(inplace=True)
      ))

    if num_classes > 0:
      self.fc_list = nn.ModuleList()
      for _ in range(num_stripes):
        fc = nn.Linear(local_conv_out_channels, num_classes)
        init.normal(fc.weight, std=0.001)
        init.constant(fc.bias, 0)
        self.fc_list.append(fc)
예제 #3
0
def weights_init_kaiming(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('Conv') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('Linear') != -1:
        init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
예제 #4
0
def weights_init_orthogonal(m):
    classname = m.__class__.__name__
    print(classname)
    if classname.find('Conv') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('Linear') != -1:
        init.orthogonal(m.weight.data, gain=1)
    elif classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
예제 #5
0
 def reset_parameters(self):
     """
     Initialize parameters following the way proposed in the paper.
     """
     init.orthogonal(self.weight_ih.data)
     weight_hh_data = torch.eye(self.hidden_size)
     weight_hh_data = weight_hh_data.repeat(1, 3)
     self.weight_hh.data.set_(weight_hh_data)
     # The bias is just set to zero vectors.
     if self.use_bias:
         init.constant(self.bias.data, val=0)
예제 #6
0
    def __init__(self, depth, pretrained=True, cut_at_pooling=False,
                 num_features=0, norm=False, dropout=0, num_classes=0,
                 num_diff_features=0, iden_pretrain = False,
                 model_path='/media/hh/disc_d/hh/open-reid-master/pretrained model/resnet50.pth'):
        super(ResNet, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.cut_at_pooling = cut_at_pooling
        self.iden_pretrain = iden_pretrain

        # Construct base (pretrained) resnet
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        # self.base = ResNet.__factory[depth](pretrained=pretrained)
        self.base = baseresnet.ResNet(baseresnet.Bottleneck, [3, 4, 6, 3])
        if pretrained is True:
            self.base.load_state_dict(torch.load(model_path))
        self.relu = nn.ReLU(inplace=True)
        if not self.cut_at_pooling:
            self.num_features = num_features
            self.num_diff_features = num_diff_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            out_planes = self.base.fc.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes, self.num_features)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal(self.feat.weight, mode='fan_out')
                init.constant(self.feat.bias, 0)
                init.constant(self.feat_bn.weight, 1)
                init.constant(self.feat_bn.bias, 0)
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_diff_features > 0:
                self.diff_feat = nn.Linear(self.num_features, self.num_diff_features)
                init.orthogonal(self.diff_feat.weight)
                init.constant(self.diff_feat.bias, 0)
            if self.num_classes > 0:
                self.classifier = nn.Linear(self.num_features, self.num_classes)
                # init.orthogonal(self.classifier.weight)
                init.normal(self.classifier.weight, std=0.001)
                init.constant(self.classifier.bias, 0)

        if not self.pretrained:
            self.reset_params()
  def __init__(self, local_conv_out_channels=128, num_classes=None):
    super(Model, self).__init__()
    self.base = resnet50(pretrained=True)
    planes = 2048
    self.local_conv = nn.Conv2d(planes, local_conv_out_channels, 1)
    self.local_bn = nn.BatchNorm2d(local_conv_out_channels)
    self.local_relu = nn.ReLU(inplace=True)

    if num_classes is not None:
      self.fc = nn.Linear(planes, num_classes)
      init.normal(self.fc.weight, std=0.001)
      init.constant(self.fc.bias, 0)
예제 #8
0
    def __init__(self, num_features=0, norm=False, dropout=0,
                 num_diff_features=0):
        super(Trip_embedding, self).__init__()

        self.num_features = num_features
        self.num_diff_features = num_diff_features
        self.norm = norm
        self.dropout = dropout
        if self.dropout > 0:
            self.drop = nn.Dropout(self.dropout)
        if self.num_diff_features > 0:
            self.diff_feat = nn.Linear(self.num_features, self.num_diff_features)
            init.orthogonal(self.diff_feat.weight)
            init.constant(self.diff_feat.bias, 0)
예제 #9
0
파일: models.py 프로젝트: lysh/tsn-pytorch
    def _prepare_tsn(self, num_class):
        feature_dim = getattr(self.base_model, self.base_model.last_layer_name).in_features
        if self.dropout == 0:
            setattr(self.base_model, self.base_model.last_layer_name, nn.Linear(feature_dim, num_class))
            self.new_fc = None
        else:
            setattr(self.base_model, self.base_model.last_layer_name, nn.Dropout(p=self.dropout))
            self.new_fc = nn.Linear(feature_dim, num_class)

        std = 0.001
        if self.new_fc is None:
            normal(getattr(self.base_model, self.base_model.last_layer_name).weight, 0, std)
            constant(getattr(self.base_model, self.base_model.last_layer_name).bias, 0)
        else:
            normal(self.new_fc.weight, 0, std)
            constant(self.new_fc.bias, 0)
        return feature_dim
예제 #10
0
파일: utils.py 프로젝트: phonx/MUNIT
 def init_fun(m):
     classname = m.__class__.__name__
     if (classname.find('Conv') == 0 or classname.find('Linear') == 0) and hasattr(m, 'weight'):
         # print m.__class__.__name__
         if init_type == 'gaussian':
             init.normal(m.weight.data, 0.0, 0.02)
         elif init_type == 'xavier':
             init.xavier_normal(m.weight.data, gain=math.sqrt(2))
         elif init_type == 'kaiming':
             init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
         elif init_type == 'orthogonal':
             init.orthogonal(m.weight.data, gain=math.sqrt(2))
         elif init_type == 'default':
             pass
         else:
             assert 0, "Unsupported initialization: {}".format(init_type)
         if hasattr(m, 'bias') and m.bias is not None:
             init.constant(m.bias.data, 0.0)
 def init_func(m):
     classname = m.__class__.__name__
     if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
         if init_type == 'normal':
             init.normal(m.weight.data, 0.0, gain)
         elif init_type == 'xavier':
             init.xavier_normal(m.weight.data, gain=gain)
         elif init_type == 'kaiming':
             init.kaiming_normal(m.weight.data, a=0, mode='fan_in')
         elif init_type == 'orthogonal':
             init.orthogonal(m.weight.data, gain=gain)
         else:
             raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
         if hasattr(m, 'bias') and m.bias is not None:
             init.constant(m.bias.data, 0.0)
     elif classname.find('BatchNorm2d') != -1:
         init.normal(m.weight.data, 1.0, gain)
         init.constant(m.bias.data, 0.0)
예제 #12
0
    def __init__(self, num_features=0):
        super(Reconstruct, self).__init__()
        self.num_features = num_features

        self.fc_re = nn.Linear(self.num_features, 2048)
        self.fc_rebn = nn.BatchNorm1d(2048)
        init.kaiming_normal(self.fc_re.weight, mode='fan_out')
        init.constant(self.fc_re.bias, 0)
        # init.constant(self.fc_rebn.weight, 1)
        # init.constant(self.fc_rebn.bias, 0)
        self.upconv5 = nn.ConvTranspose2d(2048, 1024, 3, 2, 1)
        self.upconv5_bn = nn.BatchNorm2d(1024)
        self.upconv4 = nn.ConvTranspose2d(1024, 512, 3, 2, 1)
        self.upconv4_bn = nn.BatchNorm2d(512)
        self.upconv3 = nn.ConvTranspose2d(512, 256, 3, 2, 1)
        self.upconv3_bn = nn.BatchNorm2d(256)
        self.upconv2 = nn.ConvTranspose2d(256, 64, 3, 1, 1)
        self.upconv2_bn = nn.BatchNorm2d(64)
        self.upconv1 = nn.ConvTranspose2d(64, 3, 7, 2, 3)
예제 #13
0
 def reset_params(self):
     for m in self.modules():
         if isinstance(m, nn.Conv2d):
             init.kaiming_normal(m.weight, mode='fan_out')
             if m.bias is not None:
                 init.constant(m.bias, 0)
         elif isinstance(m, nn.BatchNorm2d):
             init.constant(m.weight, 1)
             init.constant(m.bias, 0)
         elif isinstance(m, nn.Linear):
             init.normal(m.weight, std=0.001)
             if m.bias is not None:
                 init.constant(m.bias, 0)
예제 #14
0
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)
def conv_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        init.xavier_uniform(m.weight, gain=np.sqrt(2))
        init.constant(m.bias, 0)
    elif classname.find('BatchNorm') != -1:
        init.constant(m.weight, 1)
        init.constant(m.bias, 0)
예제 #16
0
    def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        weight_hh_data = torch.eye(self.hidden_size)
        weight_hh_data = weight_hh_data.repeat(4, 1)
        self.weight_hh.data.set_(weight_hh_data)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
예제 #17
0
    def __init__(self, depth, pretrained=True, cut_at_pooling=False,
                 num_features=0, norm=False, dropout=0, num_classes=0):
        super(ResNet, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.cut_at_pooling = cut_at_pooling

        # Construct base (pretrained) resnet
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet.__factory[depth](pretrained=pretrained)

        if not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            out_planes = self.base.fc.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes, self.num_features)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal(self.feat.weight, mode='fan_out')
                init.constant(self.feat.bias, 0)
                init.constant(self.feat_bn.weight, 1)
                init.constant(self.feat_bn.bias, 0)
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
                self.classifier = nn.Linear(self.num_features, self.num_classes)
                init.normal(self.classifier.weight, std=0.001)
                init.constant(self.classifier.bias, 0)

        if not self.pretrained:
            self.reset_params()
예제 #18
0
 def __init__(self, in_size, out_size, kernel_size=3,stride=1, padding=1, activation=nn.ReLU(), space_dropout=False):
     super(UNetUpBlock, self).__init__()
     self.conv0 = nn.Conv2d(in_size, out_size, 3, stride=1, padding=1)
     self.conv = nn.Conv2d(in_size, out_size, kernel_size, stride=1, padding=1)
     self.conv2 = nn.Conv2d(out_size, out_size, kernel_size,stride=1, padding=1)
     init.xavier_normal(self.conv0.weight,gain=np.sqrt(2))
     init.xavier_normal(self.conv.weight,gain=np.sqrt(2))
     init.xavier_normal(self.conv2.weight,gain=np.sqrt(2))
     init.constant(self.conv0.bias, 0.1)
     init.constant(self.conv.bias, 0.1)
     init.constant(self.conv2.bias, 0.1)
     self.activation = activation
     self.upsampler = nn.Upsample(scale_factor=2)
예제 #19
0
    def __init__(self, in_size, out_size, kernel_size=3, stride=1, padding=1, activation = nn.ReLU(), downsample=True):
        super(UNetConvBlock, self).__init__()
        self.conv_down = nn.Conv2d(in_size, in_size, kernel_size, stride=2, padding=1)
        self.conv = nn.Conv2d(in_size, out_size, kernel_size, stride=1, padding=padding)
        self.conv2 = nn.Conv2d(out_size, out_size, kernel_size,stride=1, padding=1)
        init.xavier_normal(self.conv_down.weight,gain=np.sqrt(2))
        init.xavier_normal(self.conv.weight,gain=np.sqrt(2))
        init.xavier_normal(self.conv2.weight,gain=np.sqrt(2))
        init.constant(self.conv_down.bias,0.1)
        init.constant(self.conv.bias, 0.1)
        init.constant(self.conv2.bias, 0.1)

        self.activation = activation
        self.downsample = downsample
예제 #20
0
def init_params(net):
    '''Init layer parameters.'''
    for m in net.modules():
        if isinstance(m, nn.Conv2d):
            init.kaiming_normal(m.weight, mode='fan_out')
            if m.bias:
                init.constant(m.bias, 0)
        elif isinstance(m, nn.BatchNorm2d):
            init.constant(m.weight, 1)
            init.constant(m.bias, 0)
        elif isinstance(m, nn.Linear):
            init.normal(m.weight, std=1e-3)
            if m.bias:
                init.constant(m.bias, 0)


# _, term_width = os.popen('stty size', 'r').read().split()
# term_width = int(term_width)
#
# TOTAL_BAR_LENGTH = 65.
# last_time = time.time()
# begin_time = last_time
# def progress_bar(current, total, msg=None):
#     global last_time, begin_time
#     if current == 0:
#         begin_time = time.time()  # Reset for new bar.
#
#     cur_len = int(TOTAL_BAR_LENGTH*current/total)
#     rest_len = int(TOTAL_BAR_LENGTH - cur_len) - 1
#
#     sys.stdout.write(' [')
#     for i in range(cur_len):
#         sys.stdout.write('=')
#     sys.stdout.write('>')
#     for i in range(rest_len):
#         sys.stdout.write('.')
#     sys.stdout.write(']')
#
#     cur_time = time.time()
#     step_time = cur_time - last_time
#     last_time = cur_time
#     tot_time = cur_time - begin_time
#
#     L = []
#     L.append('  Step: %s' % format_time(step_time))
#     L.append(' | Tot: %s' % format_time(tot_time))
#     if msg:
#         L.append(' | ' + msg)
#
#     msg = ''.join(L)
#     sys.stdout.write(msg)
#     for i in range(term_width-int(TOTAL_BAR_LENGTH)-len(msg)-3):
#         sys.stdout.write(' ')
#
#     # Go back to the center of the bar.
#     for i in range(term_width-int(TOTAL_BAR_LENGTH/2)+2):
#         sys.stdout.write('\b')
#     sys.stdout.write(' %d/%d ' % (current+1, total))
#
#     if current < total-1:
#         sys.stdout.write('\r')
#     else:
#         sys.stdout.write('\n')
#     sys.stdout.flush()
#
# def format_time(seconds):
#     days = int(seconds / 3600/24)
#     seconds = seconds - days*3600*24
#     hours = int(seconds / 3600)
#     seconds = seconds - hours*3600
#     minutes = int(seconds / 60)
#     seconds = seconds - minutes*60
#     secondsf = int(seconds)
#     seconds = seconds - secondsf
#     millis = int(seconds*1000)
#
#     f = ''
#     i = 1
#     if days > 0:
#         f += str(days) + 'D'
#         i += 1
#     if hours > 0 and i <= 2:
#         f += str(hours) + 'h'
#         i += 1
#     if minutes > 0 and i <= 2:
#         f += str(minutes) + 'm'
#         i += 1
#     if secondsf > 0 and i <= 2:
#         f += str(secondsf) + 's'
#         i += 1
#     if millis > 0 and i <= 2:
#         f += str(millis) + 'ms'
#         i += 1
#     if f == '':
#         f = '0ms'
#     return f
    def __init__(self):
        super(Net, self).__init__()
        self.conv1 = nn.Conv2d(in_channels=1,
                               out_channels=8,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        self.conv2 = nn.Conv2d(in_channels=8,
                               out_channels=8,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        self.conv3 = nn.Conv2d(in_channels=8,
                               out_channels=16,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        # self.conv_attention1 = Attention_linear(16, 64)
        self.conv_attention1 = Attention_nonlinear(16, 64, 64)
        self.conv4 = nn.Conv2d(in_channels=16,
                               out_channels=16,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        self.conv5 = nn.Conv2d(in_channels=16,
                               out_channels=32,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        # self.conv_attention2 = Attention_linear(32, 64)
        self.conv_attention2 = Attention_nonlinear(32, 64, 64)

        self.conv6 = nn.Conv2d(in_channels=32,
                               out_channels=32,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        # self.conv_attention3 = Attention_linear(32, 64)
        self.conv_attention3 = Attention_nonlinear(32, 64, 64)

        self.conv7 = nn.Conv2d(in_channels=32,
                               out_channels=64,
                               kernel_size=(3, 3),
                               padding=(2, 2),
                               dilation=2)
        self.conv5_drop = nn.Dropout(p=.2)
        self.fc1 = nn.Linear(in_features=80, out_features=32)
        self.fc2 = nn.Linear(in_features=32, out_features=10)
        self.fc3 = nn.Linear(in_features=10, out_features=2)

        for m in self.modules():

            if isinstance(m, nn.Conv2d) or isinstance(m, nn.Linear):
                #init.xavier_normal(m.weight.data, gain=nn.init.calculate_gain('relu'))
                init.xavier_uniform(m.weight.data,
                                    gain=nn.init.calculate_gain('relu'))
                #init.kaiming_uniform(m.weight.data)
                init.constant(m.bias, .1)

            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()
예제 #22
0
def weights_init(net):
    for m in net.modules():
        if isinstance(m, torch.nn.Conv3d) or isinstance(
                m, torch.nn.ConvTranspose3d):
            init.kaiming_normal(m.weight)
            init.constant(m.bias, 0.01)
예제 #23
0
 def _layer_init(self, layer):
     init.xavier_uniform(layer.weight, gain=1)
     init.constant(layer.bias, val=0)
 def _init_weights(self):
     if cfg.MRCNN.CONV_INIT == 'GaussianFill':
         init.normal(self.upconv5.weight, std=0.001)
     elif cfg.MRCNN.CONV_INIT == 'MSRAFill':
         init.kaiming_normal(self.upconv5.weight)
     init.constant(self.upconv5.bias, 0)
예제 #25
0
 def reset_parameters(self):
     init.kaiming_normal(self.attend[0].weight.data)
     init.constant(self.attend[0].bias.data, val=0)
예제 #26
0
def _init_weight(modules):
    for m in modules:
        if isinstance(m, (nn.Conv2d, nn.Conv3d)):
            init.xavier_uniform(m.weight.data)
            if m.bias is not None:
                init.constant(m.bias.data, 0)
예제 #27
0
파일: BMN.py 프로젝트: yinanhe/ForgeryNet
 def weight_init(m):
     if isinstance(m, nn.Conv2d):
         init.xavier_uniform_(m.weight)
         #init.xavier_normal(m.weight)
         init.constant(m.bias, 0)
예제 #28
0
def weights_init(m):
    if isinstance(m, nn.Conv2d):
        init.xavier_uniform(m.weight, gain=numpy.sqrt(2.0))
        init.constant(m.bias, 0.1)
예제 #29
0
def weights_init_bn(m):
    classname = m.__class__.__name__
    # print(classname)
    if classname.find('BatchNorm2d') != -1:
        init.normal(m.weight.data, 1.0, 0.02)
        init.constant(m.bias.data, 0.0)
예제 #30
0
    def __init__(self,
                 channels=3,
                 dimension=35,
                 activation=nn.ReLU(),
                 pretrained=True):
        super(SqueezeSimplePredictor, self).__init__()
        self.activation = activation
        self.dimension = dimension
        first_norm_layer = nn.BatchNorm2d(96)
        final_norm_layer = nn.BatchNorm2d(dimension)
        self.conv1 = nn.Conv2d(channels, 96, kernel_size=7, stride=2)
        self.norm1 = first_norm_layer
        self.downsample1 = nn.MaxPool2d(kernel_size=3, stride=2)
        self.fire1 = FireConvNorm(96, 16, 64, 64, activation=activation)
        self.fire2 = FireConvNorm(128, 16, 64, 64, activation=activation)
        self.fire3 = FireConvNorm(128, 32, 128, 128, activation=activation)
        self.downsample2 = nn.MaxPool2d(kernel_size=3, stride=2)
        self.fire4 = FireConvNorm(256, 32, 128, 128, activation=activation)
        self.fire5 = FireConvNorm(256, 48, 192, 192, activation=activation)
        self.fire6 = FireConvNorm(384, 48, 192, 192, activation=activation)
        self.fire7 = FireConvNorm(384, 64, 256, 256, activation=activation)
        self.downsample3 = nn.MaxPool2d(kernel_size=3, stride=2)
        self.fire8 = FireConvNorm(512, 64, 256, 256, activation=activation)
        if pretrained:
            model = models.squeezenet1_0(pretrained=True).features
            if channels == 3:
                self.conv1 = model[0]

            self.fire1.squeeze = model[3].squeeze
            self.fire1.expand1x1 = model[3].expand1x1
            self.fire1.expand3x3 = model[3].expand3x3
            self.fire1.ConfigureNorm()

            self.fire2.squeeze = model[4].squeeze
            self.fire2.expand1x1 = model[4].expand1x1
            self.fire2.expand3x3 = model[4].expand3x3
            self.fire2.ConfigureNorm()

            self.fire3.squeeze = model[5].squeeze
            self.fire3.expand1x1 = model[5].expand1x1
            self.fire3.expand3x3 = model[5].expand3x3
            self.fire3.ConfigureNorm()

            self.fire4.squeeze = model[7].squeeze
            self.fire4.expand1x1 = model[7].expand1x1
            self.fire4.expand3x3 = model[7].expand3x3
            self.fire4.ConfigureNorm()

            self.fire5.squeeze = model[8].squeeze
            self.fire5.expand1x1 = model[8].expand1x1
            self.fire5.expand3x3 = model[8].expand3x3
            self.fire5.ConfigureNorm()

            self.fire6.squeeze = model[9].squeeze
            self.fire6.expand1x1 = model[9].expand1x1
            self.fire6.expand3x3 = model[9].expand3x3
            self.fire6.ConfigureNorm()

            self.fire7.squeeze = model[10].squeeze
            self.fire7.expand1x1 = model[10].expand1x1
            self.fire7.expand3x3 = model[10].expand3x3
            self.fire7.ConfigureNorm()

            self.fire8.squeeze = model[12].squeeze
            self.fire8.expand1x1 = model[12].expand1x1
            self.fire8.expand3x3 = model[12].expand3x3
            self.fire8.ConfigureNorm()

        else:
            for m in self.modules():
                if isinstance(m, nn.Conv2d):
                    init.kaiming_uniform(m.weight)
                    if m.bias is not None:
                        init.constant(m.bias, 0)

        self.predictor = nn.Sequential(
            nn.Dropout(p=0),
            nn.Conv2d(LATENT_DIM, dimension, kernel_size=1),
            final_norm_layer,
            activation,
            nn.AvgPool2d(kernel_size=12, stride=1),
        )
예제 #31
0
    def create_network(self):
        if self.stride_before_pool:
            conv_stride = self.pool_time_stride
            pool_stride = 1
        else:
            conv_stride = 1
            pool_stride = self.pool_time_stride
        pool_class_dict = dict(max=nn.MaxPool2d, mean=AvgPool2dWithConv)
        first_pool_class = pool_class_dict[self.first_pool_mode]
        later_pool_class = pool_class_dict[self.later_pool_mode]
        model = nn.Sequential()
        if self.split_first_layer:
            model.add_module('dimshuffle', Expression(_transpose_time_to_spat))
            model.add_module(
                'conv_time',
                nn.Conv2d(
                    1,
                    self.n_filters_time,
                    (self.filter_time_length, 1),
                    stride=1,
                ))
            model.add_module(
                'conv_spat',
                nn.Conv2d(self.n_filters_time,
                          self.n_filters_spat, (1, self.in_chans),
                          stride=(conv_stride, 1),
                          bias=not self.batch_norm))
            n_filters_conv = self.n_filters_spat
        else:
            model.add_module(
                'conv_time',
                nn.Conv2d(self.in_chans,
                          self.n_filters_time, (self.filter_time_length, 1),
                          stride=(conv_stride, 1),
                          bias=not self.batch_norm))
            n_filters_conv = self.n_filters_time
        if self.batch_norm:
            model.add_module(
                'bnorm',
                nn.BatchNorm2d(n_filters_conv,
                               momentum=self.batch_norm_alpha,
                               affine=True,
                               eps=1e-5),
            )
        model.add_module('conv_nonlin', Expression(self.first_nonlin))
        model.add_module(
            'pool',
            first_pool_class(kernel_size=(self.pool_time_length, 1),
                             stride=(pool_stride, 1)))
        model.add_module('pool_nonlin', Expression(self.first_pool_nonlin))

        def add_conv_pool_block(model, n_filters_before, n_filters,
                                filter_length, block_nr):
            suffix = '_{:d}'.format(block_nr)
            model.add_module('drop' + suffix, nn.Dropout(p=self.drop_prob))
            model.add_module(
                'conv' + suffix.format(block_nr),
                nn.Conv2d(n_filters_before,
                          n_filters, (filter_length, 1),
                          stride=(conv_stride, 1),
                          bias=not self.batch_norm))
            if self.batch_norm:
                model.add_module(
                    'bnorm' + suffix,
                    nn.BatchNorm2d(n_filters,
                                   momentum=self.batch_norm_alpha,
                                   affine=True,
                                   eps=1e-5))
            model.add_module('nonlin' + suffix, Expression(self.later_nonlin))

            model.add_module(
                'pool' + suffix,
                later_pool_class(kernel_size=(self.pool_time_length, 1),
                                 stride=(pool_stride, 1)))
            model.add_module('pool_nonlin' + suffix,
                             Expression(self.later_pool_nonlin))

        add_conv_pool_block(model, n_filters_conv, self.n_filters_2,
                            self.filter_length_2, 2)
        add_conv_pool_block(model, self.n_filters_2, self.n_filters_3,
                            self.filter_length_3, 3)
        add_conv_pool_block(model, self.n_filters_3, self.n_filters_4,
                            self.filter_length_4, 4)

        model.eval()
        if self.final_conv_length == 'auto':
            out = model(
                np_to_var(
                    np.ones((1, self.in_chans, self.input_time_length, 1),
                            dtype=np.float32)))
            n_out_time = out.cpu().data.numpy().shape[2]
            self.final_conv_length = n_out_time
        model.add_module(
            'conv_classifier',
            nn.Conv2d(self.n_filters_4,
                      self.n_classes, (self.final_conv_length, 1),
                      bias=True))
        model.add_module('softmax', nn.LogSoftmax())
        model.add_module('squeeze', Expression(_squeeze_final_output))

        # Initialization, xavier is same as in our paper...
        # was default from lasagne
        init.xavier_uniform(model.conv_time.weight, gain=1)
        # maybe no bias in case of no split layer and batch norm
        if self.split_first_layer or (not self.batch_norm):
            init.constant(model.conv_time.bias, 0)
        if self.split_first_layer:
            init.xavier_uniform(model.conv_spat.weight, gain=1)
            if not self.batch_norm:
                init.constant(model.conv_spat.bias, 0)
        if self.batch_norm:
            init.constant(model.bnorm.weight, 1)
            init.constant(model.bnorm.bias, 0)
        param_dict = dict(list(model.named_parameters()))
        for block_nr in range(2, 5):
            conv_weight = param_dict['conv_{:d}.weight'.format(block_nr)]
            init.xavier_uniform(conv_weight, gain=1)
            if not self.batch_norm:
                conv_bias = param_dict['conv_{:d}.bias'.format(block_nr)]
                init.constant(conv_bias, 0)
            else:
                bnorm_weight = param_dict['bnorm_{:d}.weight'.format(block_nr)]
                bnorm_bias = param_dict['bnorm_{:d}.bias'.format(block_nr)]
                init.constant(bnorm_weight, 1)
                init.constant(bnorm_bias, 0)

        init.xavier_uniform(model.conv_classifier.weight, gain=1)
        init.constant(model.conv_classifier.bias, 0)

        # Start in eval mode
        model.eval()
        return model
예제 #32
0
    def __init__(self,
                 depth,
                 pretrained=True,
                 cut_at_pooling=False,
                 num_features=0,
                 norm=False,
                 dropout=0,
                 num_classes=0,
                 FCN=False,
                 radius=1.,
                 thresh=0.5):
        super(ResNet, self).__init__()

        self.depth = depth
        self.pretrained = pretrained
        self.cut_at_pooling = cut_at_pooling
        self.FCN = FCN

        # Construct base (pretrained) resnet
        if depth not in ResNet.__factory:
            raise KeyError("Unsupported depth:", depth)
        self.base = ResNet.__factory[depth](pretrained=pretrained)

        #==========================add dilation=============================#
        if self.FCN:
            for mo in self.base.layer4[0].modules():
                if isinstance(mo, nn.Conv2d):
                    mo.stride = (1, 1)
#================append conv for FCN==============================#
            self.num_features = num_features
            self.num_classes = 702  #num_classes
            self.dropout = dropout
            out_planes = self.base.fc.in_features
            self.local_conv = nn.Conv2d(out_planes,
                                        self.num_features,
                                        kernel_size=1,
                                        padding=0,
                                        bias=False)
            init.kaiming_normal(self.local_conv.weight, mode='fan_out')
            #            init.constant(self.local_conv.bias,0)
            self.feat_bn2d = nn.BatchNorm2d(
                self.num_features)  #may not be used, not working on caffe
            init.constant(self.feat_bn2d.weight,
                          1)  #initialize BN, may not be used
            init.constant(self.feat_bn2d.bias,
                          0)  # iniitialize BN, may not be used

            # self.offset = ConvOffset2D(256)

            ##---------------------------stripe1----------------------------------------------#
            self.instance0 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance0.weight, std=0.001)
            init.constant(self.instance0.bias, 0)
            ##---------------------------stripe1----------------------------------------------#
            ##---------------------------stripe1----------------------------------------------#
            self.instance1 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance1.weight, std=0.001)
            init.constant(self.instance1.bias, 0)
            ##---------------------------stripe1----------------------------------------------#
            ##---------------------------stripe1----------------------------------------------#
            self.instance2 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance2.weight, std=0.001)
            init.constant(self.instance2.bias, 0)
            ##---------------------------stripe1----------------------------------------------#
            ##---------------------------stripe1----------------------------------------------#
            self.instance3 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance3.weight, std=0.001)
            init.constant(self.instance3.bias, 0)
            ##---------------------------stripe1----------------------------------------------#
            ##---------------------------stripe1----------------------------------------------#
            self.instance4 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance4.weight, std=0.001)
            init.constant(self.instance4.bias, 0)
            ##---------------------------stripe1----------------------------------------------#
            ##---------------------------stripe1----------------------------------------------#
            self.instance5 = nn.Linear(self.num_features, self.num_classes)
            init.normal(self.instance5.weight, std=0.001)
            init.constant(self.instance5.bias, 0)

            self.drop = nn.Dropout(self.dropout)

        elif not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            self.radius = nn.Parameter(torch.FloatTensor([radius]))
            self.thresh = nn.Parameter(torch.FloatTensor([thresh]))

            out_planes = self.base.fc.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes,
                                      self.num_features,
                                      bias=False)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal(self.feat.weight, mode='fan_out')
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
                self.classifier = nn.Linear(self.num_features,
                                            self.num_classes,
                                            bias=True)
                init.normal(self.classifier.weight, std=0.001)
                init.constant(self.classifier.bias, 0)

        if not self.pretrained:
            self.reset_params()
예제 #33
0
 def _set_init(self, layer):
     init.normal(layer.weight, mean=0., std=.1)
     init.constant(layer.bias, 0.5)
예제 #34
0
 def __init__(self, n_channels, init_scale):
     super(L2NormScale, self).__init__()
     self.n_channels = n_channels
     self.init_scale = init_scale
     self.weight = nn.Parameter(torch.Tensor(self.n_channels))
     init.constant(self.weight, self.init_scale)
예제 #35
0
 def weight_init(m):
     if isinstance(m, nn.Conv2d):
         init.xavier_normal(m.weight)
         init.constant(m.bias, 0)
    def reset_parameters(self):
        init.kaiming_normal(self.encoder1[0].weight.data)
        init.kaiming_normal(self.encoder1[2].weight.data)
        init.constant(self.encoder1[0].bias.data, val=0)
        init.constant(self.encoder1[2].bias.data, val=0)

        init.kaiming_normal(self.encoder2[0].weight.data)
        init.kaiming_normal(self.encoder2[2].weight.data)
        init.constant(self.encoder2[0].bias.data, val=0)
        init.constant(self.encoder2[2].bias.data, val=0)

        init.kaiming_normal(self.encoder3[0].weight.data)
        init.kaiming_normal(self.encoder3[2].weight.data)
        init.constant(self.encoder3[0].bias.data, val=0)
        init.constant(self.encoder3[2].bias.data, val=0)

        init.kaiming_normal(self.encoder4[0].weight.data)
        init.constant(self.encoder4[0].bias.data, val=0)

        init.kaiming_normal(self.decoder1[0].weight.data)
        init.kaiming_normal(self.decoder1[2].weight.data)
        init.constant(self.decoder1[0].bias.data, val=0)
        init.constant(self.decoder1[2].bias.data, val=0)

        init.kaiming_normal(self.decoder2[0].weight.data)
        init.kaiming_normal(self.decoder2[2].weight.data)
        init.constant(self.decoder2[0].bias.data, val=0)
        init.constant(self.decoder2[2].bias.data, val=0)

        init.kaiming_normal(self.decoder3[0].weight.data)
        init.kaiming_normal(self.decoder3[2].weight.data)
        init.constant(self.decoder3[0].bias.data, val=0)
        init.constant(self.decoder3[2].bias.data, val=0)

        init.kaiming_normal(self.decoder4[0].weight.data)
        init.constant(self.decoder4[0].bias.data, val=0)
 def _set_init(self, layer):
     init.normal(layer.weight, mean=0., std=.1)
     init.constant(layer.bias, B_INIT)
예제 #38
0
def resnet50(num_classes=751, **kwargs):
    pretrained = True
    model = ResNet(Bottleneck,
                   layers=[3, 4, 6, 3],
                   num_classes=num_classes,
                   **kwargs)

    if pretrained:
        # if False:
        print(
            '***************************wgc will succeed! load model!********************************8'
        )
        updated_params = model_zoo.load_url(model_urls['resnet50'])
        updated_params.pop('fc.weight')
        updated_params.pop('fc.bias')
        # print('updated_params:',updated_params)
        # print('wgcwgcwgc:',type(updated_params))
        new_params = model.state_dict()
        new_params.update(updated_params)
        model.load_state_dict(new_params)
    # else:
    if False:
        print(
            '***************************wgc will succeed! no pretrained********************************8'
        )
        for m in model.modules():
            if isinstance(m, nn.Conv2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.BatchNorm2d):
                init.constant(m.weight, 1)
                init.constant(m.bias, 0)
            elif isinstance(m, nn.Linear):
                init.normal(m.weight, std=0.001)
                if m.bias is not None:
                    init.constant(m.bias, 0)

        init.kaiming_normal(model.fc1.weight, mode='fan_out')
        init.constant(model.fc1.bias, 0)
        init.constant(model.bn2.weight, 1)
        init.constant(model.bn2.bias, 0)

        init.normal(model.fc2.weight, std=0.001)
        init.constant(model.fc2.bias, 0)
    return model
예제 #39
0
 def _set_init(self, layer):  # 参数初始化
     init.normal(layer.weight, mean=0., std=.1)
     init.constant(layer.bias, B_INIT)
예제 #40
0
 def reset_parameters(self):
     init.constant(self.weight, self.gamma)
예제 #41
0
    def _construct(self, layer_config):
        """
        Method to construct the layer from the layer_config dictionary parameters.
        """
        self.linear = nn.Linear(layer_config['n_in'], layer_config['n_out'])
        self.bn = lambda x: x
        if 'batch_norm' in layer_config:
            if layer_config['batch_norm']:
                self.bn = nn.BatchNorm1d(layer_config['n_out'], momentum=0.99)
        if 'weight_norm' in layer_config:
            if layer_config['weight_norm']:
                self.linear = nn.utils.weight_norm(self.linear, name='weight')

        init_gain = 1.
        if 'non_linearity' in layer_config:
            non_linearity = layer_config['non_linearity']
            if non_linearity is None or non_linearity == 'linear':
                self.non_linearity = lambda x: x
            elif non_linearity == 'relu':
                self.non_linearity = nn.ReLU()
                init_gain = init.calculate_gain('relu')
            elif non_linearity == 'leaky_relu':
                self.non_linearity = nn.LeakyReLU()
            elif non_linearity == 'clipped_leaky_relu':
                self.non_linearity = ClippedLeakyReLU(negative_slope=1. / 3,
                                                      clip_min=-3,
                                                      clip_max=3)
            elif non_linearity == 'elu':
                self.non_linearity = nn.ELU()
            elif non_linearity == 'selu':
                self.non_linearity = nn.SELU()
            elif non_linearity == 'tanh':
                self.non_linearity = nn.Tanh()
                init_gain = init.calculate_gain('tanh')
            elif non_linearity == 'sigmoid':
                self.non_linearity = nn.Sigmoid()
            else:
                raise Exception('Non-linearity ' + str(non_linearity) +
                                ' not found.')
        else:
            self.non_linearity = lambda x: x

        self.dropout = lambda x: x
        if 'dropout' in layer_config:
            if layer_config['dropout'] is not None:
                self.dropout = nn.Dropout(layer_config['dropout'])

        if 'initialize' in layer_config:
            initialize = layer_config['initialize']
            if initialize == 'normal':
                init.normal(self.linear.weight)
            elif initialize == 'glorot_uniform':
                init.xavier_uniform(self.linear.weight, gain=init_gain)
            elif initialize == 'glorot_normal':
                init.xavier_normal(self.linear.weight, gain=init_gain)
            elif initialize == 'kaiming_uniform':
                init.kaiming_uniform(self.linear.weight)
            elif initialize == 'kaiming_normal':
                init.kaiming_normal(self.linear.weight)
            elif initialize == 'orthogonal':
                init.orthogonal(self.linear.weight, gain=init_gain)
            elif initialize == '':
                pass
            else:
                raise Exception('Parameter initialization ' + str(initialize) +
                                ' not found.')

            if 'batch_norm' in layer_config:
                if layer_config['batch_norm']:
                    init.normal(self.bn.weight, 1, 0.02)
                    init.constant(self.bn.bias, 0.)
        else:
            init.xavier_normal(self.linear.weight, gain=init_gain)

        init.constant(self.linear.bias, 0.)
예제 #42
0
    def __init__(self,
                 max_num_nodes,
                 input_dim,
                 hidden_dim,
                 embedding_dim,
                 label_dim,
                 num_layers,
                 assign_hidden_dim,
                 assign_ratio=0.25,
                 assign_num_layers=-1,
                 num_pooling=1,
                 pred_hidden_dims=[50],
                 concat=True,
                 bn=True,
                 dropout=0.0,
                 linkpred=True,
                 assign_input_dim=-1,
                 args=None):
        '''
        Args:
            num_layers: number of gc layers before each pooling
            num_nodes: number of nodes for each graph in batch
            linkpred: flag to turn on link prediction side objective
        '''

        super(SoftPoolingGcnEncoder,
              self).__init__(input_dim,
                             hidden_dim,
                             embedding_dim,
                             label_dim,
                             num_layers,
                             pred_hidden_dims=pred_hidden_dims,
                             concat=concat,
                             args=args)
        add_self = not concat
        self.num_pooling = num_pooling
        self.linkpred = linkpred

        # GC
        self.conv_first_after_pool = []
        self.conv_block_after_pool = []
        self.conv_last_after_pool = []
        for i in range(num_pooling):
            # use self to register the modules in self.modules()
            self.conv_first2, self.conv_block2, self.conv_last2 = self.build_conv_layers(
                self.pred_input_dim,
                hidden_dim,
                embedding_dim,
                num_layers,
                add_self,
                normalize=True,
                dropout=dropout)
            self.conv_first_after_pool.append(self.conv_first2)
            self.conv_block_after_pool.append(self.conv_block2)
            self.conv_last_after_pool.append(self.conv_last2)

        # assignment
        if assign_num_layers == -1:
            assign_num_layers = num_layers
        if assign_input_dim == -1:
            assign_input_dim = input_dim
        assign_dim = int(max_num_nodes * assign_ratio)
        self.assign_conv_first, self.assign_conv_block, self.assign_conv_last = self.build_conv_layers(
            assign_input_dim,
            assign_hidden_dim,
            assign_dim,
            assign_num_layers,
            add_self,
            normalize=True)
        assign_pred_input_dim = assign_hidden_dim * (
            num_layers - 1) + assign_dim if concat else assign_dim
        self.assign_pred = self.build_pred_layers(assign_pred_input_dim, [],
                                                  assign_dim,
                                                  num_aggs=1)

        self.pred_model = self.build_pred_layers(self.pred_input_dim *
                                                 (num_pooling + 1),
                                                 pred_hidden_dims,
                                                 label_dim,
                                                 num_aggs=self.num_aggs)

        for m in self.modules():
            if isinstance(m, GraphConv):
                m.weight.data = init.xavier_uniform(
                    m.weight.data, gain=nn.init.calculate_gain('relu'))
                if m.bias is not None:
                    m.bias.data = init.constant(m.bias.data, 0.0)
예제 #43
0
    def __init__(self,
                 growth_rate=32,
                 block_config=(6, 12, 24, 16),
                 num_init_features=64,
                 bn_size=4,
                 drop_rate=0,
                 hidden_num=2048,
                 cut_at_pooling=False,
                 num_features=0,
                 norm=False,
                 dropout=0,
                 num_classes=0):

        super(DenseNet, self).__init__()
        self.cut_at_pooling = cut_at_pooling
        # First convolution
        self.features = nn.Sequential(
            OrderedDict([
                ('conv0',
                 nn.Conv2d(3,
                           num_init_features,
                           kernel_size=7,
                           stride=2,
                           padding=3,
                           bias=False)),
                ('norm0', nn.BatchNorm2d(num_init_features)),
                ('relu0', nn.ReLU(inplace=True)),
                ('pool0', nn.MaxPool2d(kernel_size=3, stride=2, padding=1)),
            ]))

        # Each denseblock
        num_features = num_init_features
        for i, num_layers in enumerate(block_config):
            if i < 2:
                block = _DenseBlock(num_layers=num_layers,
                                    num_input_features=num_features,
                                    bn_size=bn_size,
                                    growth_rate=growth_rate,
                                    drop_rate=drop_rate)
                self.features.add_module('denseblock%d' % (i + 1), block)
                num_features = num_features + num_layers * growth_rate
                if i != len(block_config) - 1:
                    trans = _Transition(num_input_features=num_features,
                                        num_output_features=num_features // 2)
                    self.features.add_module('transition%d' % (i + 1), trans)
                    num_features = num_features // 2
        self.hidden = nn.Sequential(
            OrderedDict([
                ('norm2', nn.BatchNorm2d(num_features)),
                ('relu2', nn.ReLU(inplace=True)),
                ('conv3',
                 nn.Conv2d(num_features,
                           num_features,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False)),
                ('norm3', nn.BatchNorm2d(num_features)),
                ('relu3', nn.ReLU(inplace=True)),
                ('pool3', nn.MaxPool2d(kernel_size=2, stride=2, padding=1)),
                ('conv4',
                 nn.Conv2d(num_features,
                           num_features,
                           kernel_size=3,
                           stride=1,
                           padding=1,
                           bias=False)),
                ('norm4', nn.BatchNorm2d(num_features)),
                ('relu4', nn.ReLU(inplace=True)),
                ('pool4', nn.MaxPool2d(kernel_size=2, stride=2, padding=1)),
            ]))
        self.embedding = nn.Sequential(
            OrderedDict([('feat', nn.Linear(3 * 6 * 512, hidden_num)),
                         ('feat_bn', nn.BatchNorm2d(hidden_num)),
                         ('feat_relu', nn.ReLU(inplace=True))]))
        if not self.cut_at_pooling:
            self.num_features = num_features
            self.norm = norm
            self.dropout = dropout
            self.has_embedding = num_features > 0
            self.num_classes = num_classes

            out_planes = self.base.classifier.in_features

            # Append new layers
            if self.has_embedding:
                self.feat = nn.Linear(out_planes, self.num_features)
                self.feat_bn = nn.BatchNorm1d(self.num_features)
                init.kaiming_normal(self.feat.weight, mode='fan_out')
                init.constant(self.feat.bias, 0)
                init.constant(self.feat_bn.weight, 1)
                init.constant(self.feat_bn.bias, 0)
            else:
                # Change the num_features to CNN output channels
                self.num_features = out_planes
            if self.dropout > 0:
                self.drop = nn.Dropout(self.dropout)
            if self.num_classes > 0:
                self.classifier_bn = nn.BatchNorm1d(self.num_features)
                self.classifier = nn.Linear(self.num_features,
                                            self.num_classes)
                init.constant(self.classifier_bn.weight, 1)
                init.constant(self.classifier_bn.bias, 0)
                init.normal(self.classifier.weight, std=0.001)
                init.constant(self.classifier.bias, 0)
예제 #44
0
def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.normal(m.weight.data, std=0.001)
        init.constant(m.bias.data, 0.0)
예제 #45
0
    def __init__(self, dim, output_padding, numpen, slope=0.0):
        super(extractconvSDAE, self).__init__()
        self.in_dim = dim[0]
        self.nlayers = len(dim) - 1
        self.reluslope = slope
        self.numpen = numpen
        self.enc, self.dec = [], []
        self.benc, self.bdec = [], []
        for i in range(self.nlayers):
            if i == self.nlayers - 1:
                self.enc.append(nn.Linear(dim[i] * numpen, dim[i + 1]))
                self.benc.append(nn.BatchNorm2d(dim[i + 1]))
                self.dec.append(
                    nn.ConvTranspose2d(dim[i + 1],
                                       dim[i],
                                       kernel_size=numpen,
                                       stride=1))
                self.bdec.append(nn.BatchNorm2d(dim[i]))
            elif i == 0:
                self.enc.append(
                    nn.Conv2d(dim[i],
                              dim[i + 1],
                              kernel_size=4,
                              stride=2,
                              padding=1))
                self.benc.append(nn.BatchNorm2d(dim[i + 1]))
                self.dec.append(
                    nn.ConvTranspose2d(dim[i + 1],
                                       dim[i],
                                       kernel_size=4,
                                       stride=2,
                                       padding=1,
                                       output_padding=output_padding[i]))
                self.bdec.append(nn.BatchNorm2d(dim[i]))
            else:
                self.enc.append(
                    nn.Conv2d(dim[i],
                              dim[i + 1],
                              kernel_size=5,
                              stride=2,
                              padding=2))
                self.benc.append(nn.BatchNorm2d(dim[i + 1]))
                self.dec.append(
                    nn.ConvTranspose2d(dim[i + 1],
                                       dim[i],
                                       kernel_size=5,
                                       stride=2,
                                       padding=2,
                                       output_padding=output_padding[i]))
                self.bdec.append(nn.BatchNorm2d(dim[i]))
            setattr(self, 'enc_{}'.format(i), self.enc[-1])
            setattr(self, 'benc_{}'.format(i), self.benc[-1])
            setattr(self, 'dec_{}'.format(i), self.dec[-1])
            setattr(self, 'bdec_{}'.format(i), self.bdec[-1])
        self.base = []
        self.bbase = []
        for i in range(self.nlayers):
            self.base.append(nn.Sequential(*self.enc[:i]))
            self.bbase.append(nn.Sequential(*self.benc[:i]))

        # initialization
        for m in self.modules():
            if isinstance(m, nn.Linear):
                init.normal(m.weight, std=1e-2)
                if m.bias.data is not None:
                    init.constant(m.bias, 0)
            elif isinstance(m, nn.Conv2d) or isinstance(m, nn.ConvTranspose2d):
                init.kaiming_normal(m.weight, mode='fan_out')
                if m.bias.data is not None:
                    init.constant(m.bias, 0)
예제 #46
0
def weights_init(m):
    classname = m.__class__.__name__
    if 'Linear' in classname:
        init.xavier_normal(m.weight.data)
        init.constant(m.bias, 0.0)
 def init_weight(self, m):
     for each_module in m:
         if "Linear" in each_module.__class__.__name__:
             init.xavier_normal(each_module.weight)
             init.constant(each_module.bias, 0.)
예제 #48
0
from retinanet import RetinaNet


print('Loading pretrained ResNet50 model..')
d = torch.load('./model/resnet50.pth')

print('Loading into FPN50..')
fpn = FPN50()
dd = fpn.state_dict()
for k in d.keys():
    if not k.startswith('fc'):  # skip fc layers
        dd[k] = d[k]

print('Saving RetinaNet..')
net = RetinaNet()
for m in net.modules():
    if isinstance(m, nn.Conv2d):
        init.normal(m.weight, mean=0, std=0.01)
        if m.bias is not None:
            init.constant(m.bias, 0)
    elif isinstance(m, nn.BatchNorm2d):
        m.weight.data.fill_(1)
        m.bias.data.zero_()

pi = 0.01
init.constant(net.cls_head[-1].bias, -math.log((1-pi)/pi))

net.fpn.load_state_dict(dd)
torch.save(net.state_dict(), 'net.pth')
print('Done!')
예제 #49
0
def weights_init_classifier(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.normal(m.weight.data, std=0.001)
        init.constant(m.bias.data, 0.0)
예제 #50
0
    def __init__(self):
        super(Discriminator, self).__init__()

        self.conv1 = nn.Conv2d(1,32,kernel_size=3,stride=2,padding=1)   # 256x256
        self.conv2 = nn.Conv2d(32, 64, kernel_size=3, stride=2, padding=1)    # 128x128
        self.conv3 = nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1)    # 64x64
        self.conv4 = nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1)    # 32x32
        self.conv5 = nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1)    # 16x16
        self.conv6 = nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1)    # 8x8
        self.conv7 = nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1)    # 4x4
        self.conv8 = nn.Conv2d(512, 1024, kernel_size=4, stride=1, padding=0)    # 1x1
        self.bn1 = nn.BatchNorm2d(32)
        self.bn2 = nn.BatchNorm2d(64)
        self.bn3 = nn.BatchNorm2d(128)
        self.bn4 = nn.BatchNorm2d(256)
        self.bn5 = nn.BatchNorm2d(256)
        self.bn6 = nn.BatchNorm2d(512)
        self.bn7 = nn.BatchNorm2d(512)
        self.sigmoid = nn.Sigmoid()
        self.lrelu = nn.LeakyReLU(negative_slope=0.2)
        init.xavier_normal(self.conv1.weight, gain=np.sqrt(2))
        init.constant(self.conv1.bias, 0.1)
        init.xavier_normal(self.conv2.weight, gain=np.sqrt(2))
        init.constant(self.conv2.bias, 0.1)
        init.xavier_normal(self.conv3.weight, gain=np.sqrt(2))
        init.constant(self.conv3.bias, 0.1)
        init.xavier_normal(self.conv4.weight, gain=np.sqrt(2))
        init.constant(self.conv4.bias, 0.1)
        init.xavier_normal(self.conv5.weight, gain=np.sqrt(2))
        init.constant(self.conv5.bias, 0.1)
        init.xavier_normal(self.conv6.weight, gain=np.sqrt(2))
        init.constant(self.conv6.bias, 0.1)
        init.xavier_normal(self.conv7.weight, gain=np.sqrt(2))
        init.constant(self.conv7.bias, 0.1)
        init.xavier_normal(self.conv8.weight, gain=np.sqrt(2))
        init.constant(self.conv8.bias, 0.1)
예제 #51
0
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Linear') != -1:
        init.orthogonal(m.weight)
        init.constant(m.bias, 0.1)
예제 #52
0
 def _layer_init(self, layer):
     init.xavier_uniform(layer.weight)
     init.constant(layer.bias, 0.1)
예제 #53
0
def init_weights_xavier(model):
    if isinstance(model, nn.Conv2d):
        init.xavier_normal(model.weight)
        init.constant(model.bias, 0)
예제 #54
0
파일: init.py 프로젝트: gfederix/UNIT
def xavier_weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        init.xavier_uniform(m.weight, gain=np.sqrt(2))
        init.constant(m.bias, 0.1)
 def __init_weights(self):
     for m in self.modules():
         if isinstance(m, nn.Conv1d):
             kaiming_normal(m.weight, mode='fan_in')
             if m.bias is not None:
                 constant(m.bias, 0)