Пример #1
0
    def construct(self, x):
        """ipt"""
        cc_2 = P.Concat(axis=2)
        cc_3 = P.Concat(axis=3)
        zeroslike = P.ZerosLike()
        if self.output_shape[0] == -1:
            large_x = self.fold(x)
            N, C, H, _ = large_x.shape
            leftup_idx = []
            for i in range(0, H, self.kernel_size[0]):
                leftup_idx.append(i)
            NumBlock = len(leftup_idx)
            fold_x = P.Zeros()(
                (N, C, (NumBlock - 1) * self.stride + self.kernel_size[0],
                 (NumBlock - 1) * self.stride + self.kernel_size[0]),
                mstype.float32)

            for i in range(NumBlock):
                for j in range(NumBlock):
                    fold_i = i * self.stride
                    fold_j = j * self.stride
                    org_i = leftup_idx[i]
                    org_j = leftup_idx[j]
                    fills = large_x[:, :, org_i:org_i + self.kernel_size[0],
                                    org_j:org_j + self.kernel_size[1]]
                    fold_x += cc_3((cc_3((zeroslike(fold_x[:, :, :, :fold_j]), cc_2((cc_2((zeroslike(fold_x[:, :, :fold_i, fold_j:fold_j + self.kernel_size[1]]), fills)), zeroslike(fold_x[:, :, fold_i + self.kernel_size[0]:, fold_j:fold_j + self.kernel_size[1]]))))), zeroslike(fold_x[:, :, :, fold_j + self.kernel_size[1]:])))  #pylint: disable=line-too-long
            y = fold_x
        else:
            NumBlock_x = int((self.output_shape[0] - self.kernel_size[0]) /
                             self.stride + 1)
            NumBlock_y = int((self.output_shape[1] - self.kernel_size[1]) /
                             self.stride + 1)
            large_shape = [
                NumBlock_x * self.kernel_size[0],
                NumBlock_y * self.kernel_size[1]
            ]
            self.fold = _fold_(self.kernel_size, large_shape)
            large_x = self.fold(x)
            N, C, H, _ = large_x.shape
            leftup_idx_x = []
            leftup_idx_y = []
            for i in range(NumBlock_x):
                leftup_idx_x.append(i * self.kernel_size[0])
            for i in range(NumBlock_y):
                leftup_idx_y.append(i * self.kernel_size[1])
            fold_x = P.Zeros()(
                (N, C, (NumBlock_x - 1) * self.stride + self.kernel_size[0],
                 (NumBlock_y - 1) * self.stride + self.kernel_size[1]),
                mstype.float32)
            for i in range(NumBlock_x):
                for j in range(NumBlock_y):
                    fold_i = i * self.stride
                    fold_j = j * self.stride
                    org_i = leftup_idx_x[i]
                    org_j = leftup_idx_y[j]
                    fills = large_x[:, :, org_i:org_i + self.kernel_size[0],
                                    org_j:org_j + self.kernel_size[1]]
                    fold_x += cc_3((cc_3((zeroslike(fold_x[:, :, :, :fold_j]), cc_2((cc_2((zeroslike(fold_x[:, :, :fold_i, fold_j:fold_j + self.kernel_size[1]]), fills)), zeroslike(fold_x[:, :, fold_i + self.kernel_size[0]:, fold_j:fold_j + self.kernel_size[1]]))))), zeroslike(fold_x[:, :, :, fold_j + self.kernel_size[1]:])))  #pylint: disable=line-too-long
            y = fold_x
        return y
Пример #2
0
 def construct(self, x):
     """stride"""
     N, C, H, W = x.shape
     leftup_idx_x = []
     leftup_idx_y = []
     nh = int(H / self.stride)
     nw = int(W / self.stride)
     for i in range(nh):
         leftup_idx_x.append(i * self.stride)
     for i in range(nw):
         leftup_idx_y.append(i * self.stride)
     NumBlock_x = len(leftup_idx_x)
     NumBlock_y = len(leftup_idx_y)
     zeroslike = P.ZerosLike()
     cc_2 = P.Concat(axis=2)
     cc_3 = P.Concat(axis=3)
     unf_x = P.Zeros()((N, C, NumBlock_x * self.kernel_size,
                        NumBlock_y * self.kernel_size), mstype.float32)
     N, C, H, W = unf_x.shape
     for i in range(NumBlock_x):
         for j in range(NumBlock_y):
             unf_i = i * self.kernel_size
             unf_j = j * self.kernel_size
             org_i = leftup_idx_x[i]
             org_j = leftup_idx_y[j]
             fills = x[:, :, org_i:org_i + self.kernel_size,
                       org_j:org_j + self.kernel_size]
             unf_x += cc_3((cc_3((zeroslike(unf_x[:, :, :, :unf_j]), cc_2((cc_2(
                 (zeroslike(unf_x[:, :, :unf_i, unf_j:unf_j + self.kernel_size]), fills)), zeroslike(
                     unf_x[:, :, unf_i + self.kernel_size:, unf_j:unf_j + self.kernel_size]))))),
                            zeroslike(unf_x[:, :, :, unf_j + self.kernel_size:])))
     y = self.unfold(unf_x)
     return y
Пример #3
0
 def __init__(self, *args, **kwargs):
     super(Conv1d, self).__init__(*args, **kwargs)
     self.clear_buffer()
     self._linearized_weight = None
     self.transpose_op = P.Transpose()
     self.reshape_op = P.Reshape()
     self.squeeze_op = P.Squeeze(-2)
     self.zeros = P.Zeros()
     self.concat_op = P.Concat(axis=1)
     self.matmul = P.MatMul(transpose_b=True)
     self.bias_add = P.BiasAdd()
     self.get_weight = None
     self.get_bias = None
Пример #4
0
    def bprop(x, y, z, out, dout):
        input_shape = F.shape(x)
        batch_shape = input_shape[:-2]
        matrix_shape = input_shape[-2:]
        diag_shape = batch_shape + (_get_min(matrix_shape), )

        grad_shape = F.shape(dout)
        grad_dtype = get_dtype(dout)
        assist = _get_matrix_diag_part_assist(grad_shape, grad_dtype)
        dx = inner.MatrixSetDiag()(dout, P.Zeros()(diag_shape, grad_dtype),
                                   assist)
        dy = inner.MatrixDiagPart()(dout, assist)
        dz = zeros_like(z)
        return dx, dy, dz
Пример #5
0
    def __init__(
        self,
        out_channels=256,
        layers=20,
        stacks=2,
        residual_channels=512,
        gate_channels=512,
        skip_out_channels=512,
        kernel_size=3,
        dropout=1 - 0.95,
        cin_channels=-1,
        gin_channels=-1,
        n_speakers=None,
        upsample_conditional_features=False,
        upsample_net="ConvInUpsampleNetwork",
        upsample_params=None,
        scalar_input=False,
        use_speaker_embedding=False,
        output_distribution="Logistic",
        cin_pad=0,
    ):
        super(WaveNet, self).__init__()
        self.transpose_op = P.Transpose()
        self.softmax = P.Softmax(axis=1)
        self.reshape_op = P.Reshape()
        self.zeros_op = P.Zeros()
        self.ones_op = P.Ones()
        self.relu_op = P.ReLU()
        self.squeeze_op = P.Squeeze()
        self.expandim_op = P.ExpandDims()
        self.transpose_op = P.Transpose()
        self.tile_op = P.Tile()
        self.scalar_input = scalar_input
        self.out_channels = out_channels
        self.cin_channels = cin_channels
        self.output_distribution = output_distribution
        self.fack_data = P.Zeros()
        assert layers % stacks == 0
        layers_per_stack = layers // stacks
        if scalar_input:
            self.first_conv = Conv1d1x1(1, residual_channels)
        else:
            self.first_conv = Conv1d1x1(out_channels, residual_channels)

        conv_layers = []
        for layer in range(layers):
            dilation = 2**(layer % layers_per_stack)
            conv = ResidualConv1dGLU(residual_channels,
                                     gate_channels,
                                     kernel_size=kernel_size,
                                     skip_out_channels=skip_out_channels,
                                     bias=True,
                                     dropout=dropout,
                                     dilation=dilation,
                                     cin_channels=cin_channels,
                                     gin_channels=gin_channels)
            conv_layers.append(conv)
        self.conv_layers = nn.CellList(conv_layers)
        self.last_conv_layers = nn.CellList([
            nn.ReLU(),
            Conv1d1x1(skip_out_channels, skip_out_channels),
            nn.ReLU(),
            Conv1d1x1(skip_out_channels, out_channels)
        ])

        if gin_channels > 0 and use_speaker_embedding:
            assert n_speakers is not None
            self.embed_speakers = Embedding(n_speakers,
                                            gin_channels,
                                            padding_idx=None,
                                            std=0.1)
        else:
            self.embed_speakers = None

        if upsample_conditional_features:
            self.upsample_net = getattr(upsample,
                                        upsample_net)(**upsample_params)
        else:
            self.upsample_net = None

        self.factor = math.sqrt(1.0 / len(self.conv_layers))
Пример #6
0
    def __init__(
        self,
        dim_atom_embed,
        num_rbf,
        n_heads=8,
        activation=Swish(),
        max_cycles=10,
        time_embedding=0,
        use_pondering=True,
        fixed_cycles=False,
        use_filter=True,
        inside_filter=None,
        act_threshold=0.9,
        fixed_neigh=False,
    ):
        super().__init__(gather_dim=dim_atom_embed, fixed_neigh=fixed_neigh)
        if dim_atom_embed % n_heads != 0:
            raise ValueError('The term "dim_atom_embed" cannot be divisible ' +
                             'by the term "n_heads" in AirNetIneteraction! ')

        self.n_heads = n_heads
        self.max_cycles = max_cycles
        self.dim_atom_embed = dim_atom_embed
        self.num_rbf = num_rbf
        self.time_embedding = time_embedding

        if fixed_cycles:
            self.flexable_cycels = False
        else:
            self.flexable_cycels = True

        self.use_filter = use_filter
        if self.use_filter:
            # self.filter = Filter(num_rbf,dim_atom_embed,activation)
            self.filter = Dense(num_rbf,
                                dim_atom_embed,
                                has_bias=True,
                                activation=None)

        self.positional_embedding = PositionalEmbedding(dim_atom_embed)
        self.multi_head_attention = MultiheadAttention(dim_atom_embed, n_heads)

        self.act_threshold = act_threshold
        self.act_epsilon = 1.0 - act_threshold

        self.use_pondering = use_pondering
        self.pondering = None
        self.act_weight = None
        if self.max_cycles > 1:
            if self.use_pondering:
                self.pondering = Pondering(dim_atom_embed * 3, bias_const=3)
                self.act_weight = ACTWeight(self.act_threshold)
            else:
                if self.flexable_cycels:
                    raise ValueError(
                        'The term "fixed_cycles" must be True ' +
                        'when the pondering network is None in AirNetIneteraction! '
                    )
        self.fixed_weight = Tensor(1.0 / max_cycles, ms.float32)

        self.max = P.Maximum()
        self.min = P.Minimum()
        self.concat = P.Concat(-1)
        self.pack = P.Pack()
        self.reducesum = P.ReduceSum()
        self.squeeze = P.Squeeze(-1)
        self.ones_like = P.OnesLike()
        self.zeros_like = P.ZerosLike()
        self.zeros = P.Zeros()
Пример #7
0
def test_zeros_1():
    zeros = P.Zeros()
    output = zeros(2, mstype.int32)
    assert output.asnumpy().shape == (2,)
    assert np.sum(output.asnumpy()) == 0