Exemple #1
0
def init_weights(net, init_type='normal', init_gain=0.02):
    """
    Initialize network weights.
    Parameters:
        net (Cell): Network to be initialized
        init_type (str): The name of an initialization method: normal | xavier.
        init_gain (float): Gain factor for normal and xavier.
    """
    for _, cell in net.cells_and_names():
        if isinstance(cell, (nn.Conv2d, nn.Conv2dTranspose)):
            if init_type == 'normal':
                cell.weight.set_data(
                    init.initializer(init.Normal(init_gain),
                                     cell.weight.shape))
            elif init_type == 'xavier':
                cell.weight.set_data(
                    init.initializer(init.XavierUniform(init_gain),
                                     cell.weight.shape))
            elif init_type == 'constant':
                cell.weight.set_data(init.initializer(0.001,
                                                      cell.weight.shape))
            else:
                raise NotImplementedError(
                    'initialization method [%s] is not implemented' %
                    init_type)
        elif isinstance(cell, nn.BatchNorm2d):
            cell.gamma.set_data(init.initializer('ones', cell.gamma.shape))
            cell.beta.set_data(init.initializer('zeros', cell.beta.shape))
Exemple #2
0
    def _initialize_weights(self):

        self.init_parameters_data()
        for _, m in self.cells_and_names():
            np.random.seed(1)

            if isinstance(m, nn.Conv2dBnFoldQuant):
                m.weight.set_data(
                    weight_init.initializer(weight_init.Normal(),
                                            m.weight.shape, m.weight.dtype))
            elif isinstance(m, nn.DenseQuant):
                m.weight.set_data(
                    weight_init.initializer(weight_init.Normal(),
                                            m.weight.shape, m.weight.dtype))
            elif isinstance(m, nn.Conv2dBnWithoutFoldQuant):
                m.weight.set_data(
                    weight_init.initializer(weight_init.Normal(),
                                            m.weight.shape, m.weight.dtype))
 def __init__(self):
     super(ParameterNet, self).__init__()
     self.para_xavier_uniform = Parameter(init.initializer('xavier_uniform', parameter_shape), name="xavier_uniform")
     self.para_he_uniform = Parameter(init.initializer('he_uniform', parameter_shape), name="he_uniform")
     self.para_xavier_uniform2 = Parameter(init.initializer(init.XavierUniform(), parameter_shape), name="xavier_uniform2")
     self.para_he_uniform2 = Parameter(init.initializer(init.HeUniform(), parameter_shape), name="he_uniform2")
     self.para_truncated_normal = Parameter(init.initializer(init.TruncatedNormal(), parameter_shape), name="truncated_normal")
     self.para_normal = Parameter(init.initializer(init.Normal(), parameter_shape), name="normal")
     self.para_uniform = Parameter(init.initializer(init.Uniform(), parameter_shape), name="uniform")
Exemple #4
0
 def custom_init_weight(self):
     """
     Init the weight of Conv2d and Dense in the net.
     """
     for _, cell in self.cells_and_names():
         if isinstance(cell, nn.Conv2d):
             cell.weight.set_data(init.initializer(
                 KaimingNormal(a=math.sqrt(5), mode='fan_out', nonlinearity='relu'),
                 cell.weight.shape, cell.weight.dtype))
             if cell.bias is not None:
                 cell.bias.set_data(init.initializer(
                     'zeros', cell.bias.shape, cell.bias.dtype))
         elif isinstance(cell, nn.Dense):
             cell.weight.set_data(init.initializer(
                 init.Normal(0.01), cell.weight.shape, cell.weight.dtype))
             if cell.bias is not None:
                 cell.bias.set_data(init.initializer(
                     'zeros', cell.bias.shape, cell.bias.dtype))
Exemple #5
0
    def __init__(self,
                 low_dim,
                 class_num=200,
                 drop=0.2,
                 part=0,
                 alpha=0.2,
                 nheads=4,
                 arch="resnet50"):
        super(embed_net, self).__init__()
        # print("class_num is :", class_num)
        self.thermal_module = thermal_module(arch=arch)
        self.visible_module = visible_module(arch=arch)
        self.base_resnet = base_resnet(arch=arch)
        pool_dim = 2048
        self.dropout = drop
        self.part = part

        self.l2norm = Normalize(2)
        self.bottleneck = nn.BatchNorm1d(num_features=pool_dim)
        self.bottleneck.requires_grad = False  # Maybe problematic? Original in PyTorch bottleneck.bias.requires_grad(False)

        self.classifier = nn.Dense(pool_dim, class_num, has_bias=False)
        # self.classifier1 = nn.Dense(pool_dim, class_num, has_bias=False)
        # self.classifier2 = nn.Dense(pool_dim, class_num, has_bias=False)

        # TODO:add weights initialization module
        # self.bottleneck.apply(weights_init_kaiming)
        # self.classifier.apply(weights_init_classifier)
        # self.classifier1.apply(weights_init_classifier)
        # self.classifier2.apply(weights_init_classifier)


        self.classifier.weight.set_data(
           weight_init.initializer(weight_init.Normal(sigma=0.001), \
           self.classifier.weight.shape, self.classifier.weight.dtype))

        self.avgpool = P.ReduceMean(keep_dims=True)
        if self.part > 0:
            self.wpa = IWPA(pool_dim, self.part)
        else:
            self.wpa = IWPA(pool_dim, 3)
Exemple #6
0
    def __init__(self, num_classes=1000, version='1.0'):
        super(SqueezeNet, self).__init__()
        self.num_classes = num_classes
        self.version = version

        if self.version not in ['1.0', '1.1']:
            raise ValueError(
                "Unsupported SqueezeNet version {version}:"
                "1.0 or 1.1 expected".format(version=self.version))
        # extract features
        self.features = nn.SequentialCell(
            Conv2dBlock(3, 96, kernel_size=7, stride=2, pad_mode="valid"),
            nn.MaxPool2d(kernel_size=3, stride=2,
                         pad_mode="valid"),  # without ceil_mode option
            FireBlock(96, 16, 64, 64),
            FireBlock(128, 16, 64, 64),
            FireBlock(128, 32, 128, 128),
            nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid"),
            # (32, 256, 26, 26)
            FireBlock(256, 32, 128, 128),
            FireBlock(256, 48, 192, 192),
            FireBlock(384, 48, 192, 192),
            FireBlock(384, 64, 256, 256),
            nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="valid"),
            FireBlock(512, 64, 256, 256)
            # (32, 512, 12, 12)
        )
        # classifier
        self.classifier = nn.SequentialCell(
            nn.Dropout(keep_prob=0.5),
            Conv2dBlock(512,
                        self.num_classes,
                        kernel_size=1,
                        weight_init=init.Normal(sigma=0.01)),
            # Conv2dBlock(512, self.num_classes, kernel_size=1)
        )
        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
def test_init_normal():
    tensor = init.initializer(init.Normal(), [5, 4], ms.float32)
    assert isinstance(tensor, ms.Tensor), 'tensor init failed!'