예제 #1
0
 def __init__(self, strategy1, strategy2, strategy3, strategy4, strategy5, strategy6):
     super().__init__()
     self.matmul1 = P.MatMul().set_strategy(strategy1)
     self.matmul2 = P.MatMul().set_strategy(strategy2)
     self.gelu = P.Gelu().set_strategy(strategy3)
     self.tanh = P.Tanh().set_strategy(strategy4)
     self.softmax = P.Softmax(axis=(0, 1)).set_strategy(strategy5)
     self.logsoftmax = P.LogSoftmax().set_strategy(strategy6)
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.matmul1 = P.MatMul().shard(strategy1)
     self.matmul2 = P.MatMul().shard(strategy2)
     self.gelu = P.Gelu().shard(strategy3)
     self.tanh = P.Tanh().shard(strategy3)
     self.softmax = P.Softmax().shard(strategy3)
     self.logsoftmax = P.LogSoftmax().shard(strategy3)
예제 #3
0
 def _init_activation(self, act_str):
     act_str = act_str.lower()
     if act_str == "relu":
         act_func = P.ReLU()
     elif act_str == "sigmoid":
         act_func = P.Sigmoid()
     elif act_str == "tanh":
         act_func = P.Tanh()
     return act_func
예제 #4
0
파일: bgcf.py 프로젝트: yrpang/mindspore
    def __init__(self, dataset_argv, architect_argv, activation,
                 neigh_drop_rate, num_user, num_item, input_dim):
        super(BGCF, self).__init__()

        self.user_embed = Parameter(
            initializer("XavierUniform", [num_user, input_dim],
                        dtype=mstype.float32))
        self.item_embed = Parameter(
            initializer("XavierUniform", [num_item, input_dim],
                        dtype=mstype.float32))
        self.cast = P.Cast()
        self.tanh = P.Tanh()
        self.shape = P.Shape()
        self.split = P.Split(0, 2)
        self.gather = P.Gather()
        self.reshape = P.Reshape()
        self.concat_0 = P.Concat(0)
        self.concat_1 = P.Concat(1)

        (self.input_dim, self.num_user, self.num_item) = dataset_argv
        self.layer_dim = architect_argv

        self.gnew_agg_mean = MeanConv(self.input_dim,
                                      self.layer_dim,
                                      activation=activation,
                                      dropout=neigh_drop_rate[1])
        self.gnew_agg_mean.to_float(mstype.float16)

        self.gnew_agg_user = AttenConv(self.input_dim,
                                       self.layer_dim,
                                       dropout=neigh_drop_rate[2])
        self.gnew_agg_user.to_float(mstype.float16)

        self.gnew_agg_item = AttenConv(self.input_dim,
                                       self.layer_dim,
                                       dropout=neigh_drop_rate[2])
        self.gnew_agg_item.to_float(mstype.float16)

        self.user_feature_dim = self.input_dim
        self.item_feature_dim = self.input_dim

        self.final_weight = Parameter(
            initializer("XavierUniform",
                        [self.input_dim * 3, self.input_dim * 3],
                        dtype=mstype.float32))

        self.raw_agg_funcs_user = MeanConv(self.input_dim,
                                           self.layer_dim,
                                           activation=activation,
                                           dropout=neigh_drop_rate[0])
        self.raw_agg_funcs_user.to_float(mstype.float16)

        self.raw_agg_funcs_item = MeanConv(self.input_dim,
                                           self.layer_dim,
                                           activation=activation,
                                           dropout=neigh_drop_rate[0])
        self.raw_agg_funcs_item.to_float(mstype.float16)
예제 #5
0
 def __init__(self):
     super().__init__()
     self.relu = P.ReLU()
     self.sigmoid = P.Sigmoid()
     self.tanh = P.Tanh()
     self.add = P.Add()
     a = np.full((1, ), 5, dtype=np.float32)
     self.a = Parameter(Tensor(a), name="a")
     b = np.full((1, ), 4, dtype=np.float32)
     self.b = Parameter(Tensor(b), name="b")
     c = np.full((1, ), 7, dtype=np.float32)
     self.c = Parameter(Tensor(c), name="c")
예제 #6
0
파일: seq2seq.py 프로젝트: yrpang/mindspore
 def __init__(self, config):
     super(Attention, self).__init__()
     self.text_len = config.max_length
     self.attn = nn.Dense(in_channels=config.hidden_size * 3,
                          out_channels=config.hidden_size).to_float(
                              mstype.float16)
     self.fc = nn.Dense(config.hidden_size, 1,
                        has_bias=False).to_float(mstype.float16)
     self.expandims = P.ExpandDims()
     self.tanh = P.Tanh()
     self.softmax = P.Softmax()
     self.tile = P.Tile()
     self.transpose = P.Transpose()
     self.concat = P.Concat(axis=2)
     self.squeeze = P.Squeeze(axis=2)
     self.cast = P.Cast()
예제 #7
0
파일: seq2seq.py 프로젝트: yrpang/mindspore
 def __init__(self, config, is_training=True):
     super(Encoder, self).__init__()
     self.hidden_size = config.hidden_size
     self.vocab_size = config.src_vocab_size
     self.embedding_size = config.encoder_embedding_size
     self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
     self.rnn = BidirectionGRU(config, is_training=is_training).to_float(
         mstype.float16)
     self.fc = nn.Dense(2 * self.hidden_size,
                        self.hidden_size).to_float(mstype.float16)
     self.shape = P.Shape()
     self.transpose = P.Transpose()
     self.p = P.Print()
     self.cast = P.Cast()
     self.text_len = config.max_length
     self.squeeze = P.Squeeze(axis=0)
     self.tanh = P.Tanh()
예제 #8
0
 def __init__(self, config, is_training=True):
     super(Encoder, self).__init__()
     self.hidden_size = config.hidden_size
     self.vocab_size = config.src_vocab_size
     self.embedding_size = config.encoder_embedding_size
     self.embedding = nn.Embedding(self.vocab_size, self.embedding_size)
     self.rnn = GRU(input_size=self.embedding_size, \
         hidden_size=self.hidden_size, bidirectional=True).to_float(config.compute_type)
     self.fc = nn.Dense(2 * self.hidden_size,
                        self.hidden_size).to_float(config.compute_type)
     self.shape = P.Shape()
     self.transpose = P.Transpose()
     self.p = P.Print()
     self.cast = P.Cast()
     self.text_len = config.max_length
     self.squeeze = P.Squeeze(axis=0)
     self.tanh = P.Tanh()
     self.concat = P.Concat(2)
     self.dtype = config.dtype
예제 #9
0
 def __init__(self,
              feature_in_dim,
              feature_out_dim,
              activation,
              dropout=0.2):
     super(MeanConv, self).__init__()
     self.out_weight = Parameter(
         initializer("XavierUniform", [feature_in_dim * 2, feature_out_dim],
                     dtype=mstype.float32))
     if activation == "tanh":
         self.act = P.Tanh()
     elif activation == "relu":
         self.act = P.ReLU()
     else:
         raise ValueError("activation should be tanh or relu")
     self.cast = P.Cast()
     self.matmul = P.MatMul()
     self.concat = P.Concat(axis=1)
     self.reduce_mean = P.ReduceMean(keep_dims=False)
     self.dropout = nn.Dropout(keep_prob=1 - dropout)
예제 #10
0
    def __init__(self, residual_channels=None, gate_channels=None, kernel_size=None, skip_out_channels=None, bias=True,
                 dropout=1 - 0.95, dilation=1, cin_channels=-1, gin_channels=-1, padding=None, causal=True):
        super(ResidualConv1dGLU, self).__init__()
        self.dropout = dropout
        self.dropout_op = nn.Dropout(keep_prob=1. - self.dropout)
        self.eval_split_op = P.Split(axis=-1, output_num=2)
        self.train_split_op = P.Split(axis=1, output_num=2)
        self.tanh = P.Tanh()
        self.sigmoid = P.Sigmoid()
        self.mul = P.Mul()
        self.add = P.TensorAdd()

        if skip_out_channels is None:
            skip_out_channels = residual_channels
        if padding is None:
            if causal:
                padding = (kernel_size - 1) * dilation
            else:
                padding = (kernel_size - 1) // 2 * dilation
        self.causal = causal

        self.conv = Conv1d(residual_channels, gate_channels, kernel_size, pad_mode='pad',
                           padding=padding, dilation=dilation, has_bias=bias)

        # local conditioning
        if cin_channels > 0:
            self.conv1x1c = Conv1d1x1(cin_channels, gate_channels, has_bias=False)
        else:
            self.conv1x1c = None

        # global conditioning
        if gin_channels > 0:
            self.conv1x1g = Conv1d(gin_channels, gate_channels, has_bias=False, kernel_size=1, dilation=1)
        else:
            self.conv1x1g = None

        gate_out_channels = gate_channels // 2
        self.conv1x1_out = Conv1d1x1(gate_out_channels, residual_channels, has_bias=bias)
        self.conv1x1_skip = Conv1d1x1(gate_out_channels, skip_out_channels, has_bias=bias)
        self.factor = math.sqrt(0.5)
예제 #11
0
    def __init__(self, block, layer_nums, in_channels, channels, out_channels,
                 strides, num_classes, is_train):
        super(ResNet, self).__init__()

        if not len(layer_nums) == len(in_channels) == len(out_channels) == 4:
            raise ValueError(
                "the length of layer_num, in_channels, out_channels list must be 4!"
            )

        self.ha3 = HardAttn(2048)
        self.is_train = is_train
        self.conv1 = _conv7x7(3, 64, stride=2)
        self.bn1 = _bn(64)
        self.relu = nn.ReLU()
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, pad_mode="same")

        self.layer1 = self._make_layer(block,
                                       layer_nums[0],
                                       in_channel=in_channels[0],
                                       channel=channels[0],
                                       out_channel=out_channels[0],
                                       stride=strides[0])
        self.layer2 = self._make_layer(block,
                                       layer_nums[1],
                                       in_channel=in_channels[1],
                                       channel=channels[1],
                                       out_channel=out_channels[1],
                                       stride=strides[1])
        self.layer3 = self._make_layer(block,
                                       layer_nums[2],
                                       in_channel=in_channels[2],
                                       channel=channels[2],
                                       out_channel=out_channels[2],
                                       stride=strides[2])
        self.layer4 = self._make_layer(block,
                                       layer_nums[3],
                                       in_channel=in_channels[3],
                                       channel=channels[3],
                                       out_channel=out_channels[3],
                                       stride=strides[3])

        self.max = P.ReduceMax(keep_dims=True)
        self.flatten = nn.Flatten()
        self.global_bn = _bn2_kaiming(out_channels[3])
        self.partial_bn = _bn2_kaiming(out_channels[3])
        normal = Normal(0.001)
        self.global_fc = nn.Dense(out_channels[3],
                                  num_classes,
                                  has_bias=False,
                                  weight_init=normal,
                                  bias_init='zeros')
        self.partial_fc = nn.Dense(out_channels[3],
                                   num_classes,
                                   has_bias=False,
                                   weight_init=normal,
                                   bias_init='zeros')
        self.theta_0 = Tensor(np.zeros((128, 4)), mindspore.float32)
        self.theta_6 = Tensor(np.zeros((128, 4)) + 0.6, mindspore.float32)
        self.STN = STN(128, 128)
        self.concat = P.Concat(axis=1)
        self.shape = P.Shape()
        self.tanh = P.Tanh()
        self.slice = P.Slice()
        self.split = P.Split(1, 4)
 def __init__(self, strategy1, strategy2, strategy3):
     super().__init__()
     self.matmul1 = P.MatMul().shard(strategy1)
     self.matmul2 = P.MatMul().shard(strategy2)
     self.tanh = P.Tanh().shard(strategy3)
예제 #13
0
 def __init__(self):
     super(TanhNet, self).__init__()
     self.tanh = P.Tanh()
예제 #14
0
     'desc_bprop': [[1, 3, 3, 3]]}),
 ('BiasAddGrad', {
     'block': G.BiasAddGrad(),
     'desc_inputs': [[1, 3, 3, 3]],
     'skip': ['backward']}),
 ('Gelu', {
     'block': P.Gelu(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('GeluGrad', {
     'block': G.GeluGrad(),
     'desc_inputs': [[2, 2], [2, 2], [2, 2]],
     'desc_bprop': [[2, 2]],
     'skip': ['backward']}),
 ('Tanh', {
     'block': P.Tanh(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('TanhGrad', {
     'block': G.TanhGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]],
     'skip': ['backward']}),
 ('ReLU', {
     'block': P.ReLU(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLU6', {
     'block': P.ReLU6(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
예제 #15
0
    def __init__(self, weight, vocab_size, cell, batch_size):
        super(textrcnn, self).__init__()
        self.num_hiddens = 512
        self.embed_size = 300
        self.num_classes = 2
        self.batch_size = batch_size
        k = (1 / self.num_hiddens)**0.5

        self.embedding = nn.Embedding(vocab_size,
                                      self.embed_size,
                                      embedding_table=weight)
        self.embedding.embedding_table.requires_grad = False
        self.cell = cell

        self.cast = P.Cast()

        self.h1 = Tensor(
            np.zeros(shape=(self.batch_size,
                            self.num_hiddens)).astype(np.float16))
        self.c1 = Tensor(
            np.zeros(shape=(self.batch_size,
                            self.num_hiddens)).astype(np.float16))

        if cell == "lstm":
            self.lstm = P.DynamicRNN(forget_bias=0.0)
            self.w1_fw = Parameter(np.random.uniform(
                -k, k, (self.embed_size + self.num_hiddens,
                        4 * self.num_hiddens)).astype(np.float16),
                                   name="w1_fw")
            self.b1_fw = Parameter(np.random.uniform(
                -k, k, (4 * self.num_hiddens)).astype(np.float16),
                                   name="b1_fw")
            self.w1_bw = Parameter(np.random.uniform(
                -k, k, (self.embed_size + self.num_hiddens,
                        4 * self.num_hiddens)).astype(np.float16),
                                   name="w1_bw")
            self.b1_bw = Parameter(np.random.uniform(
                -k, k, (4 * self.num_hiddens)).astype(np.float16),
                                   name="b1_bw")
            self.h1 = Tensor(
                np.zeros(shape=(1, self.batch_size,
                                self.num_hiddens)).astype(np.float16))
            self.c1 = Tensor(
                np.zeros(shape=(1, self.batch_size,
                                self.num_hiddens)).astype(np.float16))

        if cell == "vanilla":
            self.rnnW_fw = nn.Dense(self.num_hiddens, self.num_hiddens)
            self.rnnU_fw = nn.Dense(self.embed_size, self.num_hiddens)
            self.rnnW_bw = nn.Dense(self.num_hiddens, self.num_hiddens)
            self.rnnU_bw = nn.Dense(self.embed_size, self.num_hiddens)

        if cell == "gru":
            self.rnnWr_fw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWz_fw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWh_fw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWr_bw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWz_bw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.rnnWh_bw = nn.Dense(self.num_hiddens + self.embed_size,
                                     self.num_hiddens)
            self.ones = Tensor(
                np.ones(shape=(self.batch_size,
                               self.num_hiddens)).astype(np.float16))
            self.rnnWr_fw.to_float(mstype.float16)
            self.rnnWz_fw.to_float(mstype.float16)
            self.rnnWh_fw.to_float(mstype.float16)
            self.rnnWr_bw.to_float(mstype.float16)
            self.rnnWz_bw.to_float(mstype.float16)
            self.rnnWh_bw.to_float(mstype.float16)

        self.transpose = P.Transpose()
        self.reduce_max = P.ReduceMax()
        self.expand_dims = P.ExpandDims()
        self.concat = P.Concat()

        self.reshape = P.Reshape()
        self.left_pad_tensor = Tensor(
            np.zeros(
                (1, self.batch_size, self.num_hiddens)).astype(np.float16))
        self.right_pad_tensor = Tensor(
            np.zeros(
                (1, self.batch_size, self.num_hiddens)).astype(np.float16))
        self.output_dense = nn.Dense(self.num_hiddens * 1, 2)
        self.concat0 = P.Concat(0)
        self.concat2 = P.Concat(2)
        self.concat1 = P.Concat(1)
        self.text_rep_dense = nn.Dense(2 * self.num_hiddens + self.embed_size,
                                       self.num_hiddens)
        self.mydense = nn.Dense(self.num_hiddens, 2)
        self.drop_out = nn.Dropout(keep_prob=0.7)
        self.tanh = P.Tanh()
        self.sigmoid = P.Sigmoid()
        self.slice = P.Slice()
        self.text_rep_dense.to_float(mstype.float16)
        self.mydense.to_float(mstype.float16)
        self.output_dense.to_float(mstype.float16)
예제 #16
0
 def __init__(self):
     super(Net, self).__init__()
     self.ops = P.Tanh()
예제 #17
0
        'desc_inputs': [5.0],
        'skip': ['backward']
    }),
    # input is Tensor(int32)
    ('Sigmoid1', {
        'block': (P.Sigmoid(), {
            'exception': TypeError,
            'error_keywords': ['Sigmoid']
        }),
        'desc_inputs': [Tensor(np.ones([3, 4]).astype(np.int32))],
        'skip': ['backward']
    }),

    # input is scalar
    ('Tanh0', {
        'block': (P.Tanh(), {
            'exception': TypeError,
            'error_keywords': ['Tanh']
        }),
        'desc_inputs': [5.0],
        'skip': ['backward']
    }),

    # input is scalar
    ('BatchNorm0', {
        'block': (P.BatchNorm(is_training=False), {
            'exception': TypeError,
            'error_keywords': ['BatchNorm']
        }),
        'desc_inputs': [5.0, 5.0, 5.0, 5.0, 5.0],
        'skip': ['backward']
예제 #18
0
 def __init__(self):
     super(Mish, self).__init__()
     self.mul = P.Mul()
     self.tanh = P.Tanh()
     self.softplus = P.Softplus()