示例#1
0
    def __init__(self, dim, n_heads):
        super().__init__()

        # h
        self.n_heads = n_heads

        # v = V / h
        self.size_per_head = dim // n_heads
        scores_mul = 1.0 / np.sqrt(float(self.size_per_head))
        self.scores_mul = ms.Tensor(scores_mul, ms.float32)

        self.exones = P.Ones()((1, 1, n_heads, 1, 1), ms.int32)

        # shape = (h, v)
        self.reshape_tail = (self.n_heads, self.size_per_head)

        self.output = Dense(dim, dim, has_bias=False)

        self.mul = P.Mul()
        self.div = P.Div()
        self.softmax = P.Softmax()
        self.bmm = P.BatchMatMul()
        self.bmmt = P.BatchMatMul(transpose_b=True)
        self.squeeze = P.Squeeze(-2)
        self.reducesum = P.ReduceSum(keep_dims=True)

        self.transpose = P.Transpose()
        self.trans_shape = (0, 1, 3, 2, 4)
示例#2
0
    def __init__(self, dim, fixed_neigh=False):
        super().__init__()
        self.fixed_neigh = fixed_neigh

        self.broad_ones = P.Ones()((1, 1, dim), ms.int32)

        if fixed_neigh:
            self.gatherd = None
        else:
            self.gatherd = P.GatherD()
示例#3
0
    def __init__(self, tot_atoms):
        super().__init__()
        # tot_atoms: A
        # tot_neigh: N =  A - 1
        tot_neigh = tot_atoms - 1
        arange = nn.Range(tot_atoms)
        nrange = nn.Range(tot_neigh)

        self.ones = P.Ones()
        self.aones = self.ones((tot_atoms), ms.int32)
        self.nones = self.ones((tot_neigh), ms.int32)

        # neighbors for no connection (A*N)
        # [[0,0,...,0],
        #  [1,1,...,1],
        #  ...........,
        #  [N,N,...,N]]
        self.nnc = F.expand_dims(arange(), -1) * self.nones
        # copy of the index range (A*N)
        # [[0,1,...,N-1],
        #  [0,1,...,N-1],
        #  ...........,
        #  [0,1,...,N-1]]
        crange = self.ones((tot_atoms, 1), ms.int32) * nrange()
        # neighbors for full connection (A*N)
        # [[1,2,3,...,N],
        #  [0,2,3,...,N],
        #  [0,1,3,....N],
        #  .............,
        #  [0,1,2,...,N-1]]
        self.nfc = crange + F.cast(self.nnc <= crange, ms.int32)

        crange1 = crange + 1
        # the matrix for index range (A*N)
        # [[1,2,3,...,N],
        #  [1,2,3,...,N],
        #  [2,2,3,....N],
        #  [3,3,3,....N],
        #  .............,
        #  [N,N,N,...,N]]
        self.mat_idx = F.select(crange1 > self.nnc, crange1, self.nnc)
示例#4
0
    def __init__(self, tot_atoms):
        super().__init__()
        # tot_atoms: A
        # tot_neigh: N =  A - 1
        tot_neigh = tot_atoms - 1
        arange = nn.Range(tot_atoms)
        nrange = nn.Range(tot_neigh)

        self.ones = P.Ones()
        self.aones = self.ones((tot_atoms), ms.int32)
        self.nones = self.ones((tot_neigh), ms.int32)
        self.eaones = F.expand_dims(self.aones, -1)

        # neighbors for no connection (A*N)
        # [[0,0,...,0],
        #  [1,1,...,1],
        #  ...........,
        #  [N,N,...,N]]
        self.nnc = F.expand_dims(arange(), -1) * self.nones

        # copy of the index range (A*N)
        # [[0,1,...,N-1],
        #  [0,1,...,N-1],
        #  ...........,
        #  [0,1,...,N-1]]
        exrange = self.ones((tot_atoms, 1), ms.int32) * nrange()

        # neighbors for full connection (A*N)
        # [[1,2,3,...,N],
        #  [0,2,3,...,N],
        #  [0,1,3,....N],
        #  .............,
        #  [0,1,2,...,N-1]]
        self.nfc = exrange + F.cast(self.nnc <= exrange, ms.int32)

        self.ar0 = nn.Range(0, tot_neigh)()
        self.ar1 = nn.Range(1, tot_atoms)()
示例#5
0
    def __init__(
        self,
        out_channels=256,
        layers=20,
        stacks=2,
        residual_channels=512,
        gate_channels=512,
        skip_out_channels=512,
        kernel_size=3,
        dropout=1 - 0.95,
        cin_channels=-1,
        gin_channels=-1,
        n_speakers=None,
        upsample_conditional_features=False,
        upsample_net="ConvInUpsampleNetwork",
        upsample_params=None,
        scalar_input=False,
        use_speaker_embedding=False,
        output_distribution="Logistic",
        cin_pad=0,
    ):
        super(WaveNet, self).__init__()
        self.transpose_op = P.Transpose()
        self.softmax = P.Softmax(axis=1)
        self.reshape_op = P.Reshape()
        self.zeros_op = P.Zeros()
        self.ones_op = P.Ones()
        self.relu_op = P.ReLU()
        self.squeeze_op = P.Squeeze()
        self.expandim_op = P.ExpandDims()
        self.transpose_op = P.Transpose()
        self.tile_op = P.Tile()
        self.scalar_input = scalar_input
        self.out_channels = out_channels
        self.cin_channels = cin_channels
        self.output_distribution = output_distribution
        self.fack_data = P.Zeros()
        assert layers % stacks == 0
        layers_per_stack = layers // stacks
        if scalar_input:
            self.first_conv = Conv1d1x1(1, residual_channels)
        else:
            self.first_conv = Conv1d1x1(out_channels, residual_channels)

        conv_layers = []
        for layer in range(layers):
            dilation = 2**(layer % layers_per_stack)
            conv = ResidualConv1dGLU(residual_channels,
                                     gate_channels,
                                     kernel_size=kernel_size,
                                     skip_out_channels=skip_out_channels,
                                     bias=True,
                                     dropout=dropout,
                                     dilation=dilation,
                                     cin_channels=cin_channels,
                                     gin_channels=gin_channels)
            conv_layers.append(conv)
        self.conv_layers = nn.CellList(conv_layers)
        self.last_conv_layers = nn.CellList([
            nn.ReLU(),
            Conv1d1x1(skip_out_channels, skip_out_channels),
            nn.ReLU(),
            Conv1d1x1(skip_out_channels, out_channels)
        ])

        if gin_channels > 0 and use_speaker_embedding:
            assert n_speakers is not None
            self.embed_speakers = Embedding(n_speakers,
                                            gin_channels,
                                            padding_idx=None,
                                            std=0.1)
        else:
            self.embed_speakers = None

        if upsample_conditional_features:
            self.upsample_net = getattr(upsample,
                                        upsample_net)(**upsample_params)
        else:
            self.upsample_net = None

        self.factor = math.sqrt(1.0 / len(self.conv_layers))
示例#6
0
def test_ones_1():
    ones = P.Ones()
    output = ones(2, mstype.int32)
    assert output.asnumpy().shape == (2,)
    assert np.sum(output.asnumpy()) == 2
示例#7
0
    def __init__(
        self,
        model,
        scale=1.0,
        shift=0.0,
        max_atoms_num=0,
        aggregate=True,
        average=False,
        atom_types=None,
        full_connect=False,
    ):
        super().__init__()

        self.predict = model
        # dim_atomembedding=model.dim_atomembedding
        self.full_connect = full_connect

        self.scale = scale
        self.shift = shift

        self.aggregate = aggregate
        self.average = average

        self.reducesum = P.ReduceSum(keep_dims=False)
        self.molsum = P.ReduceSum(keep_dims=True)
        self.reducemean = P.ReduceMean(keep_dims=False)

        if atom_types is None:
            self.fixed_atoms = False
            self.num_atoms = 0
        else:
            self.fixed_atoms = True
            model._set_fixed_atoms(True)

            if len(atom_types.shape) == 1:
                self.num_atoms = len(atom_types)
            elif len(atom_types.shape) == 2:
                self.num_atoms = len(atom_types[0])

            if self.num_atoms <= 0:
                raise ValueError(
                    "The 'num_atoms' cannot be 0 " +
                    "'atom_types' is not 'None' in MolCalculator!")

            if type(atom_types) is not Tensor:
                atom_types = Tensor(atom_types, ms.int32)

            self.atom_types = atom_types

        self.neighbors = None
        self.mask = None
        self.fc_neighbors = None
        if self.full_connect:
            if self.fixed_atoms:
                self.fc_neighbors = Types2FullConnectNeighbors(self.num_atoms)
                self.neighbors = self.fc_neighbors.get_full_neighbors()
            else:
                if max_atoms_num <= 0:
                    raise ValueError(
                        "The 'max_atoms_num' cannot be 0 " +
                        "when the 'full_connect' flag is 'True' and " +
                        "'atom_types' is 'None' in MolCalculator!")
                self.fc_neighbors = Types2FullConnectNeighbors(max_atoms_num)

        if self.fixed_atoms and self.full_connect:
            self.distances = AtomDistances(True)
            model._set_fixed_neighbors()
        else:
            self.distances = AtomDistances(False)

        self.ones = P.Ones()
示例#8
0
    def __init__(
        self,
        num_atomtypes,
        dim_atomembedding,
        min_rbf_dis,
        max_rbf_dis,
        num_rbf,
        output_dim=1,
        rbf_sigma=None,
        trainable_rbf=False,
        distance_expansion=None,
        cutoff=None,
        cutoff_network=None,
        rescale_rbf=False,
        use_all_interactions=False,
    ):
        super().__init__()
        self.num_atomtypes = num_atomtypes
        self.dim_atomembedding = dim_atomembedding
        self.num_rbf = num_rbf
        self.distance_expansion = distance_expansion
        self.rescale_rbf = rescale_rbf
        self.output_dim = output_dim
        # ~ self.n_interactions=n_interactions

        self.network_name = 'GNN_Model'

        # make a lookup table to store embeddings for each element (up to atomic
        # number max_z) each of which is a vector of size dim_atomembedding
        self.embedding = nn.Embedding(num_atomtypes,
                                      dim_atomembedding,
                                      use_one_hot=True,
                                      embedding_table=Normal(1.0))

        self.filter = None

        self.fixed_atoms = False

        # layer for expanding interatomic distances in a basis
        if distance_expansion is not None:
            self.distance_expansion = distance_expansion(
                d_min=min_rbf_dis,
                d_max=max_rbf_dis,
                num_rbf=num_rbf,
                sigma=rbf_sigma,
                trainable=trainable_rbf)
        else:
            self.distance_expansion = None

        if cutoff_network is None:
            self.cutoff_network = None
            self.cutoff = None
        else:
            if cutoff is None:
                self.cutoff_network = cutoff_network(max_rbf_dis)
                self.cutoff = max_rbf_dis
            else:
                self.cutoff_network = cutoff_network(cutoff)
                self.cutoff = cutoff

        self.interactions = None

        self.readout = None
        self.use_all_interactions = use_all_interactions
        self.gather_interactions = None

        self.debug_fun = None

        self.ones = P.Ones()
示例#9
0
 def __init__(self, dim):
     super().__init__()
     self.range = nn.Range(dim)
     ones = P.Ones()
     self.ones = ones((dim), ms.int32)