Beispiel #1
0
    def progress(self):
        self.current_scale += 1

        if self.current_scale % 4 == 0:
            self.nf *= 2

        tmp_generator = nn.CellList()
        tmp_generator.append(
            nn.SequentialCell(nn.Conv2d(3, self.nf, 3, 1),
                              nn.BatchNorm2d(self.nf), nn.LeakyReLU(2e-1)))

        for _ in range(3):
            tmp_generator.append(
                nn.SequentialCell(nn.Conv2d(self.nf, self.nf, 3, 1),
                                  nn.BatchNorm2d(self.nf), nn.LeakyReLU(2e-1)))

        tmp_generator.append(
            nn.SequentialCell(nn.Conv2d(self.nf, 3, 3, 1), nn.Tanh()))

        tmp_generator = nn.SequentialCell(*tmp_generator)

        if self.current_scale % 4 != 0:
            prev_generator = self.sub_generators[-1]

            # Initialize layers via copy
            if self.current_scale >= 1:
                tmp_generator = mindspore.load_param_into_net(
                    tmp_generator,
                    prev_generator.parameters_dict)  # 以python的字典格式加载存储

        self.sub_generators.append(tmp_generator)
        print("GENERATOR PROGRESSION DONE")
Beispiel #2
0
 def __init__(self, seq_len):
     super(ModelOneHop, self).__init__()
     self.expanddims = P.ExpandDims()
     self.expanddims_axis_0 = 1
     self.expanddims_axis_1 = 2
     self.cast = P.Cast()
     self.cast_to = mstype.float32
     self.sub = P.Sub()
     self.sub_bias = 1.0
     self.mul = P.Mul()
     self.mul_w = -10000.0
     self.input_weight_0 = Parameter(Tensor(
         np.random.uniform(0, 1, (30522, 768)).astype(np.float32)),
                                     name=None)
     self.gather_axis_0 = 0
     self.gather = P.Gather()
     self.input_weight_1 = Parameter(Tensor(
         np.random.uniform(0, 1, (2, 768)).astype(np.float32)),
                                     name=None)
     self.add = P.Add()
     self.add_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (1, seq_len, 768)).astype(np.float32)),
                               name=None)
     self.layernorm = LayerNorm()
     self.encoder_layer_1_4 = BertEncoder(seq_len)
     self.encoder_layer_5_8 = BertEncoder(seq_len)
     self.encoder_layer_9_12 = BertEncoder(seq_len)
     self.cls_ids = Tensor(np.array(0))
     self.gather_axis_1 = 1
     self.dense = nn.Dense(in_channels=768, out_channels=768, has_bias=True)
     self.tanh = nn.Tanh()
Beispiel #3
0
    def __init__(self, img_size_min, num_scale, scale_factor=4 / 3):
        super(Generator, self).__init__()
        self.img_size_min = img_size_min
        self.scale_factor = scale_factor
        self.num_scale = num_scale
        self.nf = 32
        self.current_scale = 0

        self.size_list = [
            int(self.img_size_min * scale_factor**i)
            for i in range(num_scale + 1)
        ]
        print(self.size_list)

        self.sub_generators = nn.CellList()

        first_generator = nn.CellList()

        first_generator.append(
            nn.SequentialCell(nn.Conv2d(3, self.nf, 3, 1),
                              nn.BatchNorm2d(self.nf), nn.LeakyReLU(2e-1)))
        for _ in range(3):
            first_generator.append(
                nn.SequentialCell(nn.Conv2d(self.nf, self.nf, 3, 1),
                                  nn.BatchNorm2d(self.nf), nn.LeakyReLU(2e-1)))

        first_generator.append(
            nn.SequentialCell(nn.Conv2d(self.nf, 3, 3, 1), nn.Tanh()))

        first_generator = nn.SequentialCell(*first_generator)

        self.sub_generators.append(first_generator)
Beispiel #4
0
 def __init__(self, config: Callable[..., None]) -> None:
     super().__init__()
     self.dense = nn.Dense(
         config.hidden_size,
         config.hidden_size,
         weight_init=TruncatedNormal(config.initializer_range),
     )
     self.activation = nn.Tanh()
Beispiel #5
0
 def __init__(self):
     """init function"""
     super(NewGeLU, self).__init__()
     self.mul = P.Mul()
     self.pow = P.Pow()
     self.mul = P.Mul()
     self.add = P.Add()
     self.tanh = nn.Tanh()
Beispiel #6
0
 def __init__(self, dim=258, dr=0.5):
     super(Mdnn, self).__init__()
     self.dim = dim
     self.dr = dr  # dropout_ratio
     self.fc1 = nn.Dense(dim, 512)
     self.fc2 = nn.Dense(512, 512)
     self.fc3 = nn.Dense(512, 512)
     self.fc4 = nn.Dense(512, 129)
     self.tanh = nn.Tanh()
Beispiel #7
0
    def __init__(self,
                 is_training,
                 query_size,
                 key_size,
                 num_units,
                 normalize=False,
                 initializer_range=0.1,
                 compute_type=mstype.float16):
        super(BahdanauAttention, self).__init__()
        self.is_training = is_training
        self.mask = None
        self.query_size = query_size
        self.key_size = key_size
        self.normalize = normalize
        self.num_units = num_units
        self.linear_att = Parameter(Tensor(np.random.uniform(
            -initializer_range, initializer_range, size=[num_units]),
                                           dtype=mstype.float32),
                                    name='linear_att')
        if self.normalize:
            self.normalize_scalar = Parameter(Tensor(np.array(
                [1.0 / num_units]),
                                                     dtype=mstype.float32),
                                              name='normalize_scalar')
            self.normalize_bias = Parameter(Tensor(np.zeros(num_units),
                                                   dtype=mstype.float32),
                                            name='normalize_bias')
        self.transpose = P.Transpose()
        self.transpose_orders = (1, 0, 2)
        self.shape_op = P.Shape()

        self.linear_q = nn.Dense(
            query_size,
            num_units,
            has_bias=False,
            weight_init=Uniform(initializer_range)).to_float(compute_type)

        self.linear_k = nn.Dense(
            key_size,
            num_units,
            has_bias=False,
            weight_init=Uniform(initializer_range)).to_float(compute_type)
        self.expand = P.ExpandDims()
        self.tile = P.Tile()

        self.norm = nn.Norm(axis=-1)
        self.mul = P.Mul()
        self.matmul = P.MatMul()
        self.batchMatmul = P.BatchMatMul()
        self.tanh = nn.Tanh()

        self.matmul_trans_b = P.BatchMatMul(transpose_b=True)
        self.softmax = nn.Softmax(axis=-1)
        self.reshape = P.Reshape()
        self.cast = P.Cast()
Beispiel #8
0
    def test_sequentialcell_append(self):
        input_np = np.ones((1, 3)).astype(np.float32)
        input_me = Tensor(input_np)
        relu = nn.ReLU()
        tanh = nn.Tanh()
        seq = nn.SequentialCell([relu])
        seq.append(tanh)
        out_me = seq(input_me)

        seq1 = nn.SequentialCell([relu, tanh])
        out = seq1(input_me)

        assert out[0][0] == out_me[0][0]
 def __init__(self):
     """init function"""
     super(NewGeLU, self).__init__()
     self.mul_0 = P.Mul()
     self.mul_0_w = 0.5
     self.pow_1 = P.Pow()
     self.pow_1_input_weight = 3.0
     self.mul_2 = P.Mul()
     self.mul_2_w = 0.044714998453855515
     self.add_3 = P.Add()
     self.mul_4 = P.Mul()
     self.mul_4_w = 0.7978845834732056
     self.tanh_5 = nn.Tanh()
     self.add_6 = P.Add()
     self.add_6_bias = 1.0
     self.mul_7 = P.Mul()
Beispiel #10
0
    def __init__(self, filters, n_filters, max_chars_per_token, char_embed_dim,
                 n_chars, n_highway, output_dim, activation):
        super().__init__()

        self.max_chars_per_token = max_chars_per_token

        # activation for convolutions
        if activation == 'tanh':
            self._activation = nn.Tanh()
        elif activation == 'relu':
            self._activation = nn.ReLU()
        else:
            raise ValueError("Unknown activation")

        # init char_embedding
        self.char_embedding = Embedding(n_chars + 1,
                                        char_embed_dim,
                                        embedding_table=Uniform(1.0),
                                        padding_idx=0)
        # run convolutions
        convolutions = []
        for (width, num) in filters:
            if activation == 'tanh':
                cnn_weight_init = Normal(np.sqrt(1.0 / width * char_embed_dim))
            elif activation == 'relu':
                cnn_weight_init = Uniform(0.05)
            conv = nn.Conv1d(in_channels=char_embed_dim,
                             out_channels=num,
                             kernel_size=width,
                             has_bias=True,
                             weight_init=cnn_weight_init,
                             pad_mode='valid')
            convolutions.append(conv)
        self._convolutions = nn.CellList(convolutions)

        # highway layers
        self._highways = HighWay(n_filters, n_highway, 'relu')
        # projection layer
        self._projection = nn.Dense(n_filters,
                                    output_dim,
                                    has_bias=True,
                                    weight_init=Normal(np.sqrt(1.0 /
                                                               n_filters)))
        # array operations
        self.transpose = P.Transpose()
        self.concat = P.Concat(-1)
        self.max = P.ReduceMax()
Beispiel #11
0
 def __init__(self):
     super(MaskConv, self).__init__()
     self.zeros = P.ZerosLike()
     self.conv1 = nn.Conv2d(1,
                            32,
                            kernel_size=(41, 11),
                            stride=(2, 2),
                            pad_mode='pad',
                            padding=(20, 20, 5, 5))
     self.bn1 = nn.BatchNorm2d(num_features=32)
     self.conv2 = nn.Conv2d(32,
                            32,
                            kernel_size=(21, 11),
                            stride=(2, 1),
                            pad_mode='pad',
                            padding=(10, 10, 5, 5))
     self.bn2 = nn.BatchNorm2d(num_features=32)
     self.tanh = nn.Tanh()
     self._initialize_weights()
     self.module_list = nn.CellList(
         [self.conv1, self.bn1, self.tanh, self.conv2, self.bn2, self.tanh])
Beispiel #12
0
 def __init__(self):
     super(ModelTwoHop, self).__init__()
     self.expanddims_0 = P.ExpandDims()
     self.expanddims_0_axis = 1
     self.expanddims_3 = P.ExpandDims()
     self.expanddims_3_axis = 2
     self.cast_5 = P.Cast()
     self.cast_5_to = mstype.float32
     self.sub_7 = P.Sub()
     self.sub_7_bias = 1.0
     self.mul_9 = P.Mul()
     self.mul_9_w = -10000.0
     self.gather_1_input_weight = Parameter(Tensor(
         np.random.uniform(0, 1, (30522, 768)).astype(np.float32)),
                                            name=None)
     self.gather_1_axis = 0
     self.gather_1 = P.Gather()
     self.gather_2_input_weight = Parameter(Tensor(
         np.random.uniform(0, 1, (2, 768)).astype(np.float32)),
                                            name=None)
     self.gather_2_axis = 0
     self.gather_2 = P.Gather()
     self.add_4 = P.Add()
     self.add_6 = P.Add()
     self.add_6_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (1, 448, 768)).astype(np.float32)),
                                 name=None)
     self.layernorm1_0 = LayerNorm()
     self.module50_0 = Encoder1_4()
     self.module50_1 = Encoder1_4()
     self.module50_2 = Encoder1_4()
     self.gather_643_input_weight = Tensor(np.array(0))
     self.gather_643_axis = 1
     self.gather_643 = P.Gather()
     self.dense_644 = nn.Dense(in_channels=768,
                               out_channels=768,
                               has_bias=True)
     self.tanh_645 = nn.Tanh()
Beispiel #13
0
 def __init__(self):
     super().__init__()
     MetaFactory.__init__(self)
     self.relu = ReLU()
     self.tanh = nn.Tanh()
     self.add = Add()
Beispiel #14
0
 def __init__(self):
     super().__init__()
     MetaFactory.__init__(self)
     self.relu = ReLU()
     self.tanh = nn.Tanh()
     self.softmax = nn.Softmax()
Beispiel #15
0
 def __init__(self):
     super(Model, self).__init__()
     self.tanh_0 = nn.Tanh()
     self.dense_1 = nn.Dense(in_channels=768, out_channels=1, has_bias=True)
Beispiel #16
0
class RNNCell(nn.Cell):
    """
    An Elman RNN cell with tanh or ReLU non-linearity.
    
    Args:
        input_size:  The number of expected features in the input 'x'
        hidden_size: The number of features in the hidden state 'h'
        bias: If 'False', then the layer does not use bias weights b_ih and b_hh. Default: 'True'
        nonlinearity: The non-linearity to use. Can be either 'tanh' or 'relu'. Default: 'tanh'
    
    Inputs:
        input: Tensor, (batch, input_size)
        hidden: Tensor, (batch, hidden_size)
    Outputs:
        h: Tensor, (batch, hidden_size)
    """
    nonlinearity_dict = {'tanh': nn.Tanh(), 'relu': nn.ReLU()}

    def __init__(self,
                 input_size: int,
                 hidden_size: int,
                 bias: bool = True,
                 nonlinearity: str = 'tanh'):
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.bias = bias

        stdv = 1 / math.sqrt(hidden_size)
        self.weight_ih = Parameter(
            Tensor(
                np.random.uniform(-stdv, stdv,
                                  (input_size, hidden_size)).astype(
                                      np.float32)))
        self.weight_hh = Parameter(
            Tensor(
                np.random.uniform(-stdv, stdv,
                                  (hidden_size, hidden_size)).astype(
                                      np.float32)))
        if bias:
            self.bias_ih = Parameter(
                Tensor(
                    np.random.uniform(-stdv, stdv,
                                      (hidden_size)).astype(np.float32)))
            self.bias_hh = Parameter(
                Tensor(
                    np.random.uniform(-stdv, stdv,
                                      (hidden_size)).astype(np.float32)))

        self.nonlinearity = self.nonlinearity_dict[nonlinearity]
        self.mm = P.MatMul()

    def construct(self, input: Tensor, hx: Tensor) -> Tensor:
        if self.bias:
            i_gates = self.mm(input, self.weight_ih) + self.bias_ih
            h_gates = self.mm(hx, self.weight_hh) + self.bias_hh
        else:
            i_gates = self.mm(input, self.weight_ih)
            h_gates = self.mm(hx, self.weight_hh)
        h = self.nonlinearity(i_gates + h_gates)
        return h
    def __init__(self,
                 outer_nc,
                 inner_nc,
                 in_planes=None,
                 dropout=False,
                 submodule=None,
                 outermost=False,
                 innermost=False,
                 alpha=0.2,
                 norm_mode='batch'):
        super(UnetSkipConnectionBlock, self).__init__()
        downnorm = nn.BatchNorm2d(inner_nc)
        upnorm = nn.BatchNorm2d(outer_nc)
        use_bias = False
        if norm_mode == 'instance':
            downnorm = nn.BatchNorm2d(inner_nc, affine=False)
            upnorm = nn.BatchNorm2d(outer_nc, affine=False)
            use_bias = True
        if in_planes is None:
            in_planes = outer_nc
        downconv = nn.Conv2d(in_planes,
                             inner_nc,
                             kernel_size=4,
                             stride=2,
                             padding=1,
                             has_bias=use_bias,
                             pad_mode='pad')
        downrelu = nn.LeakyReLU(alpha)
        uprelu = nn.ReLU()

        if outermost:
            upconv = nn.Conv2dTranspose(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        pad_mode='pad')
            down = [downconv]
            up = [uprelu, upconv, nn.Tanh()]
            model = down + [submodule] + up
        elif innermost:
            upconv = nn.Conv2dTranspose(inner_nc,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        has_bias=use_bias,
                                        pad_mode='pad')
            down = [downrelu, downconv]
            up = [uprelu, upconv, upnorm]
            model = down + up
        else:
            upconv = nn.Conv2dTranspose(inner_nc * 2,
                                        outer_nc,
                                        kernel_size=4,
                                        stride=2,
                                        padding=1,
                                        has_bias=use_bias,
                                        pad_mode='pad')
            down = [downrelu, downconv, downnorm]
            up = [uprelu, upconv, upnorm]

            model = down + [submodule] + up
            if dropout:
                model.append(nn.Dropout(0.5))

        self.model = nn.SequentialCell(model)
        self.skip_connections = not outermost
        self.concat = ops.Concat(axis=1)
Beispiel #18
0
    def __init__(self):
        super(MDNet, self).__init__()
        self.reshape = P.Reshape()
        self.shape = P.Shape()
        self.concat0 = P.Concat(axis=0)
        self.tanh = nn.Tanh()
        self.mat = P.MatMul()
        self.batchmat = nn.MatMul()
        self.batchmat_tran = nn.MatMul(transpose_x1=True)
        self.idt1 = Parameter(Tensor(np.random.normal(0.1, 0.001, (240, )),
                                     dtype=mstype.float32),
                              name="type0_idt1")
        self.idt2 = Parameter(Tensor(np.random.normal(0.1, 0.001, (240, )),
                                     dtype=mstype.float32),
                              name="type0_idt2")
        self.idt3 = Parameter(Tensor(np.random.normal(0.1, 0.001, (240, )),
                                     dtype=mstype.float32),
                              name="type1_idt1")
        self.idt4 = Parameter(Tensor(np.random.normal(0.1, 0.001, (240, )),
                                     dtype=mstype.float32),
                              name="type1_idt2")
        self.idt = [self.idt1, self.idt2, self.idt3, self.idt4]
        self.neuron = [dim_descrpt] + n_neuron
        self.par = [1] + filter_neuron
        self.process = Processing()
        fc = []
        for i in range(3):
            fc.append(
                nn.Dense(self.par[i],
                         self.par[i + 1],
                         weight_init=Tensor(np.random.normal(
                             0.0, 1.0 / np.sqrt(self.par[i] + self.par[i + 1]),
                             (self.par[i + 1], self.par[i])),
                                            dtype=mstype.float32),
                         bias_init=Tensor(np.random.normal(
                             0.0, 1.0, (self.par[i + 1], )),
                                          dtype=mstype.float32)))
        for i in range(1, 3):
            fc.append(
                nn.Dense(self.par[i],
                         self.par[i + 1],
                         weight_init=Tensor(np.random.normal(
                             0.0, 1.0 / np.sqrt(self.par[i] + self.par[i + 1]),
                             (self.par[i + 1], self.par[i])),
                                            dtype=mstype.float32),
                         bias_init=Tensor(np.random.normal(
                             0.0, 1.0, (self.par[i + 1], )),
                                          dtype=mstype.float32)))
        for i in range(3):
            fc.append(
                nn.Dense(self.par[i],
                         self.par[i + 1],
                         weight_init=Tensor(np.random.normal(
                             0.0, 1.0 / np.sqrt(self.par[i] + self.par[i + 1]),
                             (self.par[i + 1], self.par[i])),
                                            dtype=mstype.float32),
                         bias_init=Tensor(np.random.normal(
                             0.0, 1.0, (self.par[i + 1], )),
                                          dtype=mstype.float32)))
        for i in range(1, 3):
            fc.append(
                nn.Dense(self.par[i],
                         self.par[i + 1],
                         weight_init=Tensor(np.random.normal(
                             0.0, 1.0 / np.sqrt(self.par[i] + self.par[i + 1]),
                             (self.par[i + 1], self.par[i])),
                                            dtype=mstype.float32),
                         bias_init=Tensor(np.random.normal(
                             0.0, 1.0, (self.par[i + 1], )),
                                          dtype=mstype.float32)))
        self.fc = nn.CellList(fc)
        self.fc0 = deepcopy(self.fc)
        self.fc2 = [self.fc, self.fc0]

        fc = []
        for i in range(3):
            fc.append(
                nn.Dense(
                    self.neuron[i],
                    self.neuron[i + 1],
                    weight_init=Tensor(np.random.normal(
                        0.0,
                        1.0 / np.sqrt(self.neuron[i] + self.neuron[i + 1]),
                        (self.neuron[i + 1], self.neuron[i])),
                                       dtype=mstype.float32),
                    bias_init=Tensor(np.random.normal(0.0, 1.0,
                                                      (self.neuron[i + 1], )),
                                     dtype=mstype.float32)))
        fc.append(
            nn.Dense(240,
                     1,
                     weight_init=Tensor(np.random.normal(
                         0.0, 1.0 / np.sqrt(240 + 1), (1, 240)),
                                        dtype=mstype.float32),
                     bias_init=Tensor(np.random.normal(type_bias_ae[0], 1.0,
                                                       (1, )),
                                      dtype=mstype.float32)))
        for i in range(3):
            fc.append(
                nn.Dense(
                    self.neuron[i],
                    self.neuron[i + 1],
                    weight_init=Tensor(np.random.normal(
                        0.0,
                        1.0 / np.sqrt(self.neuron[i] + self.neuron[i + 1]),
                        (self.neuron[i + 1], self.neuron[i])),
                                       dtype=mstype.float32),
                    bias_init=Tensor(np.random.normal(0.0, 1.0,
                                                      (self.neuron[i + 1], )),
                                     dtype=mstype.float32)))
        fc.append(
            nn.Dense(240,
                     1,
                     weight_init=Tensor(np.random.normal(
                         0.0, 1.0 / np.sqrt(240 + 1), (1, 240)),
                                        dtype=mstype.float32),
                     bias_init=Tensor(np.random.normal(type_bias_ae[1], 1.0,
                                                       (1, )),
                                      dtype=mstype.float32)))
        self.fc1 = nn.CellList(fc)

        xyz_A = np.vstack((np.identity(46), np.zeros([92, 46])))
        self.xyz_A = Tensor(np.reshape(xyz_A, (1, 138, 46)))
        xyz_B = np.vstack((np.zeros([46, 92]), np.identity(92)))
        self.xyz_B = Tensor(np.reshape(xyz_B, (1, 138, 92)))

        xyz_2 = np.vstack(
            (np.identity(n_axis_neuron),
             np.zeros([self.par[-1] - n_axis_neuron, n_axis_neuron])))
        self.xyz_2 = Tensor(xyz_2)