def test_lu_weight_decomposition(self):
     # ----------------------------------------------------------------------
     # Prepare some dummy data
     # ----------------------------------------------------------------------
     data = torch.randn(2, 16, 32, 32)
     weight_mat = 2 * torch.eye(16, 16)
     # ----------------------------------------------------------------------
     # Prepare the layer
     # ----------------------------------------------------------------------
     inv_conv = Invertible1x1Conv(16, lu_decomposition=True)
     # initialize weights to get W = I
     init.eye_(inv_conv.permutation)
     init.eye_(inv_conv.lower)
     init.zeros_(inv_conv.upper)
     init.constant_(inv_conv.log_s, 0.6931471805599453)
     init.constant_(inv_conv.sign_s, 1)
     # ----------------------------------------------------------------------
     # Assess the results are as expected
     # ----------------------------------------------------------------------
     out, log_det = inv_conv(data)
     target_log_det = \
         (torch.logdet(weight_mat) * data.size(2) * data.size(3)).expand(2)
     # --------------
     error_weight = torch.mean(torch.abs(out - 2 * data)).item()
     error_log_det = torch.mean(torch.abs(log_det - target_log_det)).item()
     self.assertLessEqual(error_weight, 1e-12)
     self.assertLessEqual(error_log_det, 1e-12)
Beispiel #2
0
    def __init__(self):
        super(Point_Transform_Net, self).__init__()
        self.k = 3

        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(128)
        self.bn3 = nn.BatchNorm1d(1024)

        self.conv1 = nn.Sequential(nn.Conv2d(6, 64, kernel_size=1, bias=False),
                                   self.bn1,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.conv2 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=1, bias=False),
                                   self.bn2,
                                   nn.LeakyReLU(negative_slope=0.2))
        self.conv3 = nn.Sequential(nn.Conv1d(128, 1024, kernel_size=1, bias=False),
                                   self.bn3,
                                   nn.LeakyReLU(negative_slope=0.2))

        self.linear1 = nn.Linear(1024, 512, bias=False)
        self.bn3 = nn.BatchNorm1d(512)
        self.linear2 = nn.Linear(512, 256, bias=False)
        self.bn4 = nn.BatchNorm1d(256)

        self.transform = nn.Linear(256, 3*3)
        init.constant_(self.transform.weight, 0)
        init.eye_(self.transform.bias.view(3, 3))
Beispiel #3
0
    def __init__(self, k):  #, args):
        super(Transform_Net, self).__init__()
        #self.args = args
        self.k = k

        self.bn1 = nn.GroupNorm(32, 64)
        self.bn1_2 = nn.GroupNorm(32, 64)
        self.bn2 = nn.GroupNorm(32, 128)
        self.bn3 = nn.GroupNorm(32, 1024)

        self.conv1 = nn.Sequential(
            nn.Conv2d(self.k * 2, 64, kernel_size=1, bias=False), self.bn1,
            nn.LeakyReLU(negative_slope=0.2))
        self.conv1_2 = nn.Sequential(
            nn.Conv2d(64, 64, kernel_size=1, bias=False), self.bn1_2,
            nn.LeakyReLU(negative_slope=0.2))
        self.conv2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=1, bias=False), self.bn2,
            nn.LeakyReLU(negative_slope=0.2))
        self.conv3 = nn.Sequential(
            nn.Conv1d(128, 1024, kernel_size=1, bias=False), self.bn3,
            nn.LeakyReLU(negative_slope=0.2))

        self.linear1 = nn.Linear(1024, 512, bias=False)
        self.bn3 = nn.GroupNorm(32, 512)
        self.linear2 = nn.Linear(512, 256, bias=False)
        self.bn4 = nn.GroupNorm(32, 256)

        self.transform = nn.Linear(256, k * k)  #3*3)
        init.constant_(self.transform.weight, 0)
        init.eye_(self.transform.bias.view(k, k))  #3, 3))
Beispiel #4
0
    def __init__(self, args=None, normals=False):
        super(Transform_Net, self).__init__()
        self.args = args
        self.k = 3
        if normals:
            self.initialFeatSize = 6
            self.outputSize = 3
        else:
            self.initialFeatSize = 2 * 128
            self.outputSize = 128
        self.bn1 = nn.BatchNorm2d(64)
        self.bn2 = nn.BatchNorm2d(128)
        self.bn3 = nn.BatchNorm1d(1024)

        self.conv1 = nn.Sequential(
            nn.Conv2d(self.initialFeatSize, 64, kernel_size=1, bias=False),
            self.bn1, nn.LeakyReLU(negative_slope=0.2))
        self.conv2 = nn.Sequential(
            nn.Conv2d(64, 128, kernel_size=1, bias=False), self.bn2,
            nn.LeakyReLU(negative_slope=0.2))
        self.conv3 = nn.Sequential(
            nn.Conv1d(128, 1024, kernel_size=1, bias=False), self.bn3,
            nn.LeakyReLU(negative_slope=0.2))

        self.linear1 = nn.Linear(1024, 512, bias=False)
        self.bn3 = nn.BatchNorm1d(512)
        self.linear2 = nn.Linear(512, 256, bias=False)
        self.bn4 = nn.BatchNorm1d(256)

        self.transform = nn.Linear(256, self.outputSize * self.outputSize)
        init.constant_(self.transform.weight, 0)
        init.eye_(self.transform.bias.view(self.outputSize, self.outputSize))
Beispiel #5
0
 def init_func(m):
     classname = m.__class__.__name__
     if classname.find('BatchNorm2d') != -1:
         if hasattr(m, 'weight') and m.weight is not None:
             init.normal_(m.weight.data, 1.0, gain)
         if hasattr(m, 'bias') and m.bias is not None:
             init.constant_(m.bias.data, 0.0)
     elif hasattr(m, 'weight') and \
         (classname.find('Conv') != -1 or \
             classname.find('Linear') != -1):
         if init_type == 'normal':
             init.normal_(m.weight.data, 0.0, gain)
         elif init_type == 'xavier':
             init.xavier_normal_(m.weight.data, gain=gain)
         elif init_type == 'xavier_uniform':
             init.xavier_uniform_(m.weight.data, gain=1.0)
         elif init_type == 'kaiming':
             init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
         elif init_type == 'orthogonal':
             init.orthogonal_(m.weight.data, gain=gain)
         elif init_type == 'identity':
             init.eye_(m.weight.data)
         elif init_type == 'none':  # uses pytorch's default init method
             m.reset_parameters()
         else:
             raise NotImplementedError('[%s] is not implemented' %
                                       init_type)
         if hasattr(m, 'bias') and m.bias is not None:
             init.constant_(m.bias.data, 0.0)
Beispiel #6
0
 def reset_parameters(self):
     if self.temp_warp is not None:
         init._no_grad_fill_(self.temp, 5.0)
         init._no_grad_fill_(self.weight, -1.0)
         with torch.no_grad():
             self.weight.fill_diagonal_(1.0)
     else:
         init.eye_(self.weight)
Beispiel #7
0
 def __init__(self, cf, confidence=None):
     super(Baseline, self).__init__()
     self.cf = cf
     self.classifier = nn.Linear(cf + 1, cf + 1)
     self.dummy = counting.Counter(cf,
                                   already_sigmoided=True,
                                   confidence=confidence)
     init.eye_(self.classifier.weight)
Beispiel #8
0
def weights_init(m):
    classname = m.__class__.__name__
    if isinstance(m, nn.Linear):
        init.xavier_normal_(m.weight, gain=np.sqrt(2.0))
    elif classname.find('Conv') != -1:
        init.xavier_normal_(m.weight, gain=np.sqrt(2.0))
    elif classname.find('Linear') != -1:
        init.eye_(m.weight)
    elif classname.find('Emb') != -1:
        init.normal(m.weight, mean=0, std=0.01)
Beispiel #9
0
def weights_init(m):
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        init.xavier_uniform_(m.weight.data)
    elif classname.find('Gdn2d') != -1:
        init.eye_(m.gamma.data)
        init.constant_(m.beta.data, 1e-4)
    elif classname.find('Gdn1d') != -1:
        init.eye_(m.gamma.data)
        init.constant_(m.beta.data, 1e-4)
def ME_weights_init_connections_identity(m):
    classname = m.__class__.__name__
    if isinstance(m, ME.MinkowskiConvolution):
        m.kernel.fill_(1)  # for 1*1 convolution is equivalent to identity
        if m.bias is not None:
            m.bias.data.fill_(0)
    elif isinstance(m, ME.MinkowskiLinear):
        eye_(m.linear.weight.data)
        if m.linear.bias is not None:
            m.linear.bias.data.fill_(0)
    elif isinstance(m, ME.MinkowskiBatchNorm):
        m.bn.reset_parameters()
Beispiel #11
0
def weights_init_connections_identity(m):
    """
    For HOLMES: initialize identity connections
    """
    classname = m.__class__.__name__
    if classname.find('Conv') != -1:
        m.weight.data.fill_(1)  # for 1*1 convolution is equivalent to identity
        if m.bias is not None:
            m.bias.data.fill_(0)
    elif classname.find('Linear') != -1:
        eye_(m.weight.data)
        if m.bias is not None:
            m.bias.data.fill_(0)
Beispiel #12
0
    def __init__(self):
        super(FeatureTransformNet, self).__init__()

        self.conv_block_1 = conv_bn_block(64, 64, 1)
        self.conv_block_2 = conv_bn_block(64, 128, 1)
        self.conv_block_3 = conv_bn_block(128, 1024, 1)

        self.fc_block_4 = fc_bn_block(1024, 512)
        self.fc_block_5 = fc_bn_block(512, 256)

        self.transform = nn.Linear(256, 64*64)
        #  转换矩阵初始化
        # transform层的weight为[256 * 9],bias为9;weight全部初始化为0, bias初始为[1, 0, 0, 0, 1, 0, 0, 0, 1]
        init.constant_(self.transform.weight, 0)
        init.eye_(self.transform.bias.view(64, 64))
Beispiel #13
0
def init_iDelta(tensor, delta=0.01):
    '''initialize the tensor with I + \delta * random '''
    r = torch.empty(tensor.shape)
    i = init.eye_(tensor)
    # fan_in, fan_out = init._calculate_fan_in_and_fan_out(tensor)
    # a = math.sqrt(6/(fan_in+fan_out))# adding this scaling factor doesn't seem very different. same fast.
    # return torch.mul(a, i) + torch.mul(delta, init.xavier_uniform(r))
    return i + torch.mul(delta, init.uniform_(r, 0, 1.0))
    def __init__(self):
        super(InputTransformNet, self).__init__()
        self.relu = nn.ReLU()
        self.conv1 = nn.Conv1d(3, 64, 1)
        self.bn1 = nn.BatchNorm1d(64)
        self.conv2 = nn.Conv1d(64, 128, 1)
        self.bn2 = nn.BatchNorm1d(128)
        self.conv3 = nn.Conv1d(128, 1024, 1)
        self.bn3 = nn.BatchNorm1d(1024)
        self.fc1 = nn.Linear(1024, 512)
        self.bn4 = nn.BatchNorm1d(512)
        self.fc2 = nn.Linear(512, 256)
        self.bn5 = nn.BatchNorm1d(256)

        self.transform = nn.Linear(256, 9)
        init.constant_(self.transform.weight, 0)
        init.eye_(self.transform.bias.view(3, 3))
Beispiel #15
0
    def reset_parameters(self):
        """
        Initialize parameters following the way proposed in the paper.
        """

        # The input-to-hidden weight matrix is initialized orthogonally.
        init.orthogonal(self.weight_ih.data)
        # The hidden-to-hidden weight matrix is initialized as an identity
        # matrix.
        eye_(self.weight_hh)
        # The bias is just set to zero vectors.
        init.constant(self.bias.data, val=0)
        # Initialization of BN parameters.
        self.bn_ih.reset_parameters()
        self.bn_hh.reset_parameters()
        self.bn_c.reset_parameters()
        self.bn_ih.bias.data.fill_(0)
        self.bn_hh.bias.data.fill_(0)
        self.bn_ih.weight.data.fill_(0.1)
        self.bn_hh.weight.data.fill_(0.1)
        self.bn_c.weight.data.fill_(0.1)
    def reset_prameters(self):
        eye_(self.conv0.weight)
        self.conv0.weight.requires_grad = False
        eye_(self.conv0.lin.weight)

        self.conv1.reset_parameters()
        self.conv2.reset_parameters()
        self.conv3.reset_parameters()

        self.norm1.reset_parameters()
        self.norm2.reset_parameters()
        self.norm3.reset_parameters()

        self.lin_GNN.reset_parameters()

        self.out_conv1.reset_parameters()
        self.out_conv2.reset_parameters()
        self.out_conv3.reset_parameters()

        self.out_norm1.reset_parameters()
        self.out_norm2.reset_parameters()
        self.out_norm3.reset_parameters()

        self.lin_out.reset_parameters()
Beispiel #17
0
 def init_weights(self):
     init.eye_(self.fc1.weight)
 def reset_parameters(self):
     init.orthogonal_(self.weight_ih.data)
     init.eye_(self.weight_hh)
     init.constant_(self.bias.data, val=0)
     init.eye_(self.trans)
Beispiel #19
0
# normal
t2 = torch.Tensor(3, 1, 2)
init.normal_(t2, mean=0, std=1)

# constant
t3 = torch.tensor([3, 1], dtype=torch.int64)
init.constant_(t3, 2)
t4 = torch.Tensor(3, 1, 2)  # dim
init.constant_(t4, 2)
t5 = torch.tensor(np.ndarray([3, 1], dtype=np.float), dtype=torch.int64)
init.constant_(t5, 2)

# eye - 2 dim
t6 = torch.Tensor(3, 3)
# t6 = torch.Tensor(3, 3, 3) # error
init.eye_(t6)

# ones/zeros
t7 = torch.empty(3, 1)
# print(t7.dtype)
init.ones_(t7)
print(t7)

# dirac

# xavier_normal

# kaiming_uniform

# kaiming_normal
 def __init__(self, cf):
     super(Baseline, self).__init__()
     self.cf = cf
     self.classifier = nn.Linear(cf + 1, cf + 1)
     self.dummy = counting.Counter(cf)
     init.eye_(self.classifier.weight)
 def __init__(self, cf):
     super(Net, self).__init__()
     self.cf = cf
     self.counter = counting.Counter(cf)
     self.classifier = nn.Linear(cf + 1, cf + 1)
     init.eye_(self.classifier.weight)
Beispiel #22
0
 def __init__(self, cf):
     super(Net, self).__init__()
     self.cf = cf
     self.ctr = VQACntrModule(cf, already_sigmoided=True)
     self.cfr = nn.Linear(cf + 1, cf + 1)
     init.eye_(self.cfr.weight)
Beispiel #23
0
 def init_parameter(self, parameter):
     init.eye_(parameter)
 def reset_parameters(self):
     init.eye_(self.g)