def __init__(self, batch_size):
     """init function"""
     super(Reader_Albert, self).__init__()
     self.expanddims_0 = P.ExpandDims()
     self.expanddims_0_axis = 1
     self.expanddims_3 = P.ExpandDims()
     self.expanddims_3_axis = 2
     self.cast_5 = P.Cast()
     self.cast_5_to = mstype.float32
     self.sub_7 = P.Sub()
     self.sub_7_bias = 1.0
     self.mul_9 = P.Mul()
     self.mul_9_w = -10000.0
     self.gather_1_input_weight = Parameter(Tensor(np.random.uniform(0, 1, (30005, 128)).astype(np.float32)),
                                            name=None)
     self.gather_1_axis = 0
     self.gather_1 = P.Gather()
     self.gather_2_input_weight = Parameter(Tensor(np.random.uniform(0, 1, (2, 128)).astype(np.float32)), name=None)
     self.gather_2_axis = 0
     self.gather_2 = P.Gather()
     self.add_4 = P.Add()
     self.add_6 = P.Add()
     self.add_6_bias = Parameter(Tensor(np.random.uniform(0, 1, (1, 512, 128)).astype(np.float32)), name=None)
     self.layernorm1_0 = LayerNorm(mul_7_w_shape=(128,), add_8_bias_shape=(128,))
     self.linear3_0 = Linear(matmul_0_weight_shape=(128, 4096), add_1_bias_shape=(4096,))
     self.module34_0 = TransformerLayer(batch_size,
                                        layernorm1_0_mul_7_w_shape=(4096,),
                                        layernorm1_0_add_8_bias_shape=(4096,),
                                        linear3_0_matmul_0_weight_shape=(4096, 16384),
                                        linear3_0_add_1_bias_shape=(16384,),
                                        linear3_1_matmul_0_weight_shape=(16384, 4096),
                                        linear3_1_add_1_bias_shape=(4096,))
     self.layernorm1_1 = LayerNorm(mul_7_w_shape=(4096,), add_8_bias_shape=(4096,))
Exemple #2
0
    def __init__(self, strategy_dict=None):
        super().__init__()
        shared_np = np.full((16, 1, 32, 32), 0.5, dtype=np.float32)
        self.shared_weight = Parameter(Tensor(shared_np), name='shared_weight')
        self.fc1 = Dense(in_channels=1024,
                         out_channels=116,
                         weight_init='ones',
                         bias_init='ones',
                         has_bias=True)
        self.relu = ReLU()
        self.sigmoid = P.Sigmoid()
        self.add1 = P.Add()
        self.add2 = P.Add()
        self.mul1 = P.Mul().add_prim_attr('primitive_target', 'CPU')
        self.mul2 = P.Mul()
        self.mul3 = P.Mul()
        self.flatten = Flatten()

        mul2_weight_np = np.full((16, 116), 1, dtype=np.float32)
        self.mul2_weight = Parameter(Tensor(mul2_weight_np),
                                     name='mul2_weight')

        mul3_weight_np = np.full((16, 116), 1, dtype=np.float32)
        self.mul3_weight = Parameter(Tensor(mul3_weight_np),
                                     name='mul3_weight')

        if strategy_dict is not None:
            self.add1.shard(strategy_dict['add1'])
            self.mul1.shard(strategy_dict['mul1'])
            self.fc1.matmul.shard(strategy_dict['fc1_matmul'])
            self.fc1.bias_add.shard(strategy_dict['fc1_bias_add'])
            self.mul2.shard(strategy_dict['mul2'])
            self.mul3.shard(strategy_dict['mul3'])
Exemple #3
0
 def __init__(self):
     """init function"""
     super(Rerank_Downstream, self).__init__()
     self.dense_0 = nn.Dense(in_channels=4096,
                             out_channels=8192,
                             has_bias=True)
     self.relu_1 = nn.ReLU()
     self.reducemean_2 = P.ReduceMean(keep_dims=True)
     self.sub_3 = P.Sub()
     self.sub_4 = P.Sub()
     self.pow_5 = P.Pow()
     self.pow_5_input_weight = 2.0
     self.reducemean_6 = P.ReduceMean(keep_dims=True)
     self.add_7 = P.Add()
     self.add_7_bias = 9.999999960041972e-13
     self.sqrt_8 = P.Sqrt()
     self.div_9 = P.Div()
     self.mul_10 = P.Mul()
     self.mul_10_w = Parameter(Tensor(
         np.random.uniform(0, 1, (8192, )).astype(np.float32)),
                               name=None)
     self.add_11 = P.Add()
     self.add_11_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (8192, )).astype(np.float32)),
                                  name=None)
     self.dense_12 = nn.Dense(in_channels=8192,
                              out_channels=2,
                              has_bias=True)
Exemple #4
0
 def __init__(self):
     super(MultiHeadAttn, self).__init__()
     self.matmul_0 = nn.MatMul()
     self.matmul_0.to_float(mstype.float16)
     self.matmul_0_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                 name=None)
     self.matmul_1 = nn.MatMul()
     self.matmul_1.to_float(mstype.float16)
     self.matmul_1_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                 name=None)
     self.matmul_2 = nn.MatMul()
     self.matmul_2.to_float(mstype.float16)
     self.matmul_2_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                 name=None)
     self.add_3 = P.Add()
     self.add_3_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                 name=None)
     self.add_4 = P.Add()
     self.add_4_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                 name=None)
     self.add_5 = P.Add()
     self.add_5_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                 name=None)
     self.reshape_6 = P.Reshape()
     self.reshape_6_shape = tuple([BATCH_SIZE, 448, 12, 64])
     self.reshape_7 = P.Reshape()
     self.reshape_7_shape = tuple([BATCH_SIZE, 448, 12, 64])
     self.reshape_8 = P.Reshape()
     self.reshape_8_shape = tuple([BATCH_SIZE, 448, 12, 64])
     self.transpose_9 = P.Transpose()
     self.transpose_10 = P.Transpose()
     self.transpose_11 = P.Transpose()
     self.matmul_12 = nn.MatMul()
     self.matmul_12.to_float(mstype.float16)
     self.div_13 = P.Div()
     self.div_13_w = 8.0
     self.add_14 = P.Add()
     self.softmax_15 = nn.Softmax(axis=3)
     self.matmul_16 = nn.MatMul()
     self.matmul_16.to_float(mstype.float16)
     self.transpose_17 = P.Transpose()
     self.reshape_18 = P.Reshape()
     self.reshape_18_shape = tuple([BATCH_SIZE, 448, 768])
     self.matmul_19 = nn.MatMul()
     self.matmul_19.to_float(mstype.float16)
     self.matmul_19_w = Parameter(Tensor(
         np.random.uniform(0, 1, (768, 768)).astype(np.float32)),
                                  name=None)
     self.add_20 = P.Add()
     self.add_20_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (768, )).astype(np.float32)),
                                  name=None)
 def __init__(self):
     super().__init__()
     self.matmul = P.MatMul()
     self.tadd1 = P.Add()
     self.tadd2 = P.Add()
     self.weight = Parameter(Tensor(
         np.ones([128, 128]).astype(np.float32) * 0.01),
                             "w",
                             requires_grad=True)
 def __init__(self, batch_size, layernorm1_0_mul_7_w_shape, layernorm1_0_add_8_bias_shape,
              linear3_0_matmul_0_weight_shape, linear3_0_add_1_bias_shape, linear3_1_matmul_0_weight_shape,
              linear3_1_add_1_bias_shape):
     """init function"""
     super(TransformerLayer, self).__init__()
     self.multiheadattn_0 = MultiHeadAttn(batch_size)
     self.add_0 = P.Add()
     self.layernorm1_0 = LayerNorm(mul_7_w_shape=layernorm1_0_mul_7_w_shape,
                                   add_8_bias_shape=layernorm1_0_add_8_bias_shape)
     self.linear3_0 = Linear(matmul_0_weight_shape=linear3_0_matmul_0_weight_shape,
                             add_1_bias_shape=linear3_0_add_1_bias_shape)
     self.newgelu2_0 = NewGeLU()
     self.linear3_1 = Linear(matmul_0_weight_shape=linear3_1_matmul_0_weight_shape,
                             add_1_bias_shape=linear3_1_add_1_bias_shape)
     self.add_1 = P.Add()
Exemple #7
0
 def __init__(self):
     super().__init__()
     self.block1 = get_block()
     self.block2 = get_block()
     self.relu = P.ReLU()
     self.add = P.Add()
     self.bias = Tensor(np.ones([64, 64]), dtype=ms.float32)
 def __init__(self, seq_len):
     super(ModelOneHop, self).__init__()
     self.expanddims = P.ExpandDims()
     self.expanddims_axis_0 = 1
     self.expanddims_axis_1 = 2
     self.cast = P.Cast()
     self.cast_to = mstype.float32
     self.sub = P.Sub()
     self.sub_bias = 1.0
     self.mul = P.Mul()
     self.mul_w = -10000.0
     self.input_weight_0 = Parameter(Tensor(
         np.random.uniform(0, 1, (30522, 768)).astype(np.float32)),
                                     name=None)
     self.gather_axis_0 = 0
     self.gather = P.Gather()
     self.input_weight_1 = Parameter(Tensor(
         np.random.uniform(0, 1, (2, 768)).astype(np.float32)),
                                     name=None)
     self.add = P.Add()
     self.add_bias = Parameter(Tensor(
         np.random.uniform(0, 1, (1, seq_len, 768)).astype(np.float32)),
                               name=None)
     self.layernorm = LayerNorm()
     self.encoder_layer_1_4 = BertEncoder(seq_len)
     self.encoder_layer_5_8 = BertEncoder(seq_len)
     self.encoder_layer_9_12 = BertEncoder(seq_len)
     self.cls_ids = Tensor(np.array(0))
     self.gather_axis_1 = 1
     self.dense = nn.Dense(in_channels=768, out_channels=768, has_bias=True)
     self.tanh = nn.Tanh()
Exemple #9
0
 def __init__(self):
     super(NetWrapper, self).__init__()
     self.unq = P.Unique()
     self.add = P.Add()
     self.expand_dims = P.ExpandDims()
     self.cast = P.Cast()
     self.net = SparseApplyFtrlNet()
Exemple #10
0
    def __init__(self, inp, oup, stride, expand_ratio, last_relu=False):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        layers.extend([
            # dw
            ConvBNReLU(hidden_dim,
                       hidden_dim,
                       stride=stride,
                       groups=hidden_dim),
            # pw-linear
            nn.Conv2d(hidden_dim, oup, kernel_size=1, stride=1,
                      has_bias=False),
            _bn(oup),
        ])
        self.conv = nn.SequentialCell(layers)
        self.add = P.Add()
        self.cast = P.Cast()
        self.last_relu = last_relu
        self.relu = nn.ReLU6()
Exemple #11
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 weight_init='normal',
                 bias_init='zeros',
                 damping=0.03,
                 loss_scale=1,
                 frequency=278,
                 batch_size=32,
                 has_bias=True,
                 activation=None):
        super(Dense_Thor_GPU, self).__init__()
        self.in_channels = Validator.check_positive_int(in_channels)
        self.out_channels = Validator.check_positive_int(out_channels)
        self.has_bias = Validator.check_bool(has_bias)
        self.thor = True
        if isinstance(weight_init, Tensor):
            if weight_init.ndim != 2 or weight_init.shape[0] != out_channels or \
                    weight_init.shape[1] != in_channels:
                raise ValueError("weight_init shape error")

        self.weight = Parameter(initializer(weight_init, [out_channels, in_channels]))

        if self.has_bias:
            if isinstance(bias_init, Tensor):
                if bias_init.ndim != 1 or bias_init.shape[0] != out_channels:
                    raise ValueError("bias_init shape error")

            self.bias = Parameter(initializer(bias_init, [out_channels]))

        self.matmul = P.MatMul(transpose_b=True)
        self.bias_add = P.BiasAdd()

        self.activation = get_activation(activation)
        self.activation_flag = self.activation is not None
        split_dim = 128
        matrix_A_shape, matrix_G_shape = caculate_matmul_shape(self.in_channels, self.out_channels, split_dim)
        self.matrix_A_inv = Parameter(Tensor(np.zeros(matrix_A_shape).astype(np.float32)), requires_grad=False)
        self.matrix_G_inv = Parameter(Tensor(np.zeros(matrix_G_shape).astype(np.float32)), requires_grad=False)
        self.broadcast_to = P.BroadcastTo(matrix_A_shape)
        self.cov_step = Parameter(initializer(0, [1], mstype.int32), requires_grad=False)
        self.shape = P.Shape()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.mul = P.Mul()
        self.cube_matmul = P.MatMul(transpose_a=True)
        self.loss_scale = Tensor(1 / loss_scale, mstype.float16)
        self.batch_size = Tensor(batch_size, mstype.float16)
        self.getG = P.InsertGradientOf(self.save_gradient)
        self.damping = Parameter(Tensor(damping), requires_grad=False)
        self.dampingA = Tensor(np.identity(in_channels), mstype.float32)
        self.dampingG = Tensor(np.identity(out_channels), mstype.float32)
        self.cast = P.Cast()
        self.gather = P.Gather()
        self.freq = Tensor(frequency, mstype.int32)
        self.axis = 0
        self.add = P.Add()
        self.sqrt = P.Sqrt()
        self.cholesky = P.CholeskyTrsm(split_dim=split_dim)
        self.vector_matmul = P.BatchMatMul(transpose_a=True)
Exemple #12
0
    def __init__(self,
                 decay_policy='Linear',
                 learning_rate=0.001,
                 target_unclipped_quantile=0.9,
                 fraction_stddev=0.01,
                 seed=0):
        super(AdaClippingWithGaussianRandom, self).__init__()
        if decay_policy not in ['Linear', 'Geometric']:
            msg = "decay policy of adaptive clip must be in ['Linear', 'Geometric'], \
                but got: {}".format(decay_policy)
            LOGGER.error(TAG, msg)
            raise ValueError(msg)
        self._decay_policy = decay_policy
        learning_rate = check_param_type('learning_rate', learning_rate, float)
        learning_rate = check_value_positive('learning_rate', learning_rate)
        self._learning_rate = Tensor(learning_rate, mstype.float32)
        fraction_stddev = check_param_type('fraction_stddev', fraction_stddev,
                                           float)
        self._fraction_stddev = Tensor(fraction_stddev, mstype.float32)
        target_unclipped_quantile = check_param_type(
            'target_unclipped_quantile', target_unclipped_quantile, float)
        self._target_unclipped_quantile = Tensor(target_unclipped_quantile,
                                                 mstype.float32)

        self._zero = Tensor(0, mstype.float32)
        self._add = P.Add()
        self._sub = P.Sub()
        self._mul = P.Mul()
        self._exp = P.Exp()
        seed = check_param_type('seed', seed, int)
        self._seed = check_value_non_negative('seed', seed)
 def __init__(self):
     super().__init__()
     self.add = P.Add()
     self.sub = P.Sub()
     self.mul = P.Mul()
     self.div = P.RealDiv()
     self.net = Branch2Net()
Exemple #14
0
    def __init__(self, nptype):
        super(AddNet, self).__init__()

        self.add = P.Add()

        np.random.seed(0)
        self.x = Parameter(initializer(
            Tensor(np.random.randn(2, 0).astype(nptype)), [2, 0]), name='x')
        self.y = Parameter(initializer(
            Tensor(np.random.randn(2, 1).astype(nptype)), [2, 1]), name='y')

        self.x1 = Parameter(initializer(
            Tensor(np.arange(3).reshape(3).astype(nptype)), [3]), name='x1')
        self.y1 = Parameter(initializer(
            Tensor(np.array([2]).astype(nptype)), [1]), name='y1')

        self.x2 = Parameter(initializer(
            Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)), [3, 3, 3, 3]), name='x2')
        self.y2 = Parameter(initializer(
            Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)), [3, 3, 3, 3]), name='y2')

        self.x3 = Parameter(initializer(
            Tensor(np.arange(1 * 1 * 3 * 3).reshape(1, 1, 3, 3).astype(nptype)), [1, 1, 3, 3]), name='x3')
        self.y3 = Parameter(initializer(
            Tensor(np.arange(3 * 3 * 3 * 3).reshape(3, 3, 3, 3).astype(nptype)), [3, 3, 3, 3]), name='y3')
Exemple #15
0
    def __init__(self, in_channels, out_channels, stride=1, momentum=0.1):
        super(ResidualBlock, self).__init__()

        out_chls = out_channels // self.expansion
        self.conv1 = _conv(in_channels, out_chls, kernel_size=1, stride=1)
        self.bn1 = _bn(out_chls, momentum=momentum)

        self.conv2 = _conv(out_chls,
                           out_chls,
                           kernel_size=3,
                           stride=stride,
                           padding=1,
                           pad_mode='pad')
        self.bn2 = _bn(out_chls, momentum=momentum)

        self.conv3 = _conv(out_chls, out_channels, kernel_size=1, stride=1)
        self.bn3 = _bn(out_channels, momentum=momentum)

        self.relu = P.ReLU()
        self.downsample = (in_channels != out_channels)
        self.stride = stride
        if self.downsample or self.stride != 1:
            self.conv_down_sample = _conv(in_channels,
                                          out_channels,
                                          kernel_size=1,
                                          stride=stride)
            self.bn_down_sample = _bn(out_channels, momentum=momentum)

        self.add = P.Add()
 def __init__(self):
     """init function"""
     super(NewGeLU, self).__init__()
     self.mul_0 = P.Mul()
     self.mul_0_w = 0.5
     self.pow_1 = P.Pow()
     self.pow_1_input_weight = 3.0
     self.mul_2 = P.Mul()
     self.mul_2_w = 0.044714998453855515
     self.add_3 = P.Add()
     self.mul_4 = P.Mul()
     self.mul_4_w = 0.7978845834732056
     self.tanh_5 = nn.Tanh()
     self.add_6 = P.Add()
     self.add_6_bias = 1.0
     self.mul_7 = P.Mul()
Exemple #17
0
    def __init__(self, inp, oup, stride, expand_ratio):
        super(InvertedResidual, self).__init__()
        assert stride in [1, 2]

        hidden_dim = int(round(inp * expand_ratio))
        self.use_res_connect = stride == 1 and inp == oup

        layers = []
        if expand_ratio != 1:
            layers.append(ConvBNReLU(inp, hidden_dim, kernel_size=1))
        layers.extend([
            ConvBNReLU(hidden_dim,
                       hidden_dim,
                       stride=stride,
                       groups=hidden_dim),
            nn.Conv2dBnAct(hidden_dim,
                           oup,
                           kernel_size=1,
                           stride=1,
                           pad_mode='pad',
                           padding=0,
                           group=1,
                           has_bn=True)
        ])
        self.conv = nn.SequentialCell(layers)
        self.add = P.Add()
Exemple #18
0
    def __init__(self, batch_size, query_linear_bias, key_linear_bias,
                 value_linear_bias):
        """init function"""
        super(MultiHeadAttn, self).__init__()
        self.batch_size = batch_size
        self.matmul = nn.MatMul()
        self.add = P.Add()
        self.reshape = P.Reshape()
        self.transpose = P.Transpose()
        self.div = P.Div()
        self.softmax = nn.Softmax(axis=3)

        self.query_linear_weight = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, 4096)).astype(np.float32)),
                                             name=None)
        self.query_linear_bias = query_linear_bias

        self.key_linear_weight = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, 4096)).astype(np.float32)),
                                           name=None)
        self.key_linear_bias = key_linear_bias

        self.value_linear_weight = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, 4096)).astype(np.float32)),
                                             name=None)
        self.value_linear_bias = value_linear_bias

        self.reshape_shape = tuple([batch_size, 512, 64, 64])

        self.w = Parameter(Tensor(
            np.random.uniform(0, 1, (64, 64, 4096)).astype(np.float32)),
                           name=None)
        self.b = Parameter(Tensor(
            np.random.uniform(0, 1, (4096, )).astype(np.float32)),
                           name=None)
 def __init__(self, passthrough_w_0, passthrough_w_1):
     """init function"""
     super(LayerNorm, self).__init__()
     self.reducemean_0 = P.ReduceMean(keep_dims=True)
     self.sub_1 = P.Sub()
     self.pow_2 = P.Pow()
     self.pow_2_input_weight = 2.0
     self.reducemean_3 = P.ReduceMean(keep_dims=True)
     self.add_4 = P.Add()
     self.add_4_bias = 9.999999960041972e-13
     self.sqrt_5 = P.Sqrt()
     self.div_6 = P.Div()
     self.mul_7 = P.Mul()
     self.mul_7_w = passthrough_w_0
     self.add_8 = P.Add()
     self.add_8_bias = passthrough_w_1
Exemple #20
0
    def __init__(self, num_classes=10):
        super(SqueezeNet_Residual, self).__init__()

        self.conv1 = nn.Conv2d(3,
                               96,
                               kernel_size=7,
                               stride=2,
                               pad_mode='valid',
                               has_bias=True)
        self.fire2 = Fire(96, 16, 64, 64)
        self.fire3 = Fire(128, 16, 64, 64)
        self.fire4 = Fire(128, 32, 128, 128)
        self.fire5 = Fire(256, 32, 128, 128)
        self.fire6 = Fire(256, 48, 192, 192)
        self.fire7 = Fire(384, 48, 192, 192)
        self.fire8 = Fire(384, 64, 256, 256)
        self.fire9 = Fire(512, 64, 256, 256)
        # Final convolution is initialized differently from the rest
        self.conv10 = nn.Conv2d(512, num_classes, kernel_size=1, has_bias=True)

        self.relu = nn.ReLU()
        self.max_pool2d = nn.MaxPool2d(kernel_size=3, stride=2)
        self.add = P.Add()
        self.dropout = nn.Dropout(keep_prob=0.5)
        self.mean = P.ReduceMean(keep_dims=True)
        self.flatten = nn.Flatten()
        self.custom_init_weight()
Exemple #21
0
    def __init__(self, num_in, num_mid, num_out, kernel_size, stride=1, act_type='relu', use_se=False,
                 use_res_connect=True, last_relu=False):
        super(GhostBottleneck, self).__init__()
        self.ghost1 = GhostModule(num_in, num_mid, kernel_size=1,
                                  stride=1, padding=0, act_type=act_type)

        self.use_res_connect = use_res_connect
        self.last_relu = last_relu
        self.use_dw = stride > 1
        self.dw = None
        if self.use_dw:
            self.dw = ConvBNReLU(num_mid, num_mid, kernel_size=kernel_size, stride=stride,
                                 act_type=act_type, groups=num_mid, use_act=False)

        self.use_se = use_se
        if use_se:
            self.se = SE(num_mid)

        self.ghost2 = GhostModule(num_mid, num_out, kernel_size=1, stride=1,
                                  padding=0, act_type=act_type, use_act=False)
        self.relu = nn.ReLU()
        if self.use_res_connect:
            self.down_sample = False
            if num_in != num_out or stride != 1:
                self.down_sample = True
            self.shortcut = None
            if self.down_sample:
                self.shortcut = nn.SequentialCell([
                    ConvBNReLU(num_in, num_in, kernel_size=kernel_size, stride=stride,
                               groups=num_in, use_act=False),
                    ConvBNReLU(num_in, num_out, kernel_size=1, stride=1,
                               groups=1, use_act=False),
                ])
            self.add = P.Add()
Exemple #22
0
    def __init__(self, batch_size=4):
        super(DiceLoss, self).__init__()

        self.threshold0 = Tensor(0.5, mstype.float32)
        self.zero_float32 = Tensor(0.0, mstype.float32)
        self.k = int(640 * 640)
        self.negative_one_int32 = Tensor(-1, mstype.int32)
        self.batch_size = batch_size
        self.concat = P.Concat()
        self.less_equal = P.LessEqual()
        self.greater = P.Greater()
        self.reduce_sum = P.ReduceSum()
        self.reduce_sum_keep_dims = P.ReduceSum(keep_dims=True)
        self.reduce_mean = P.ReduceMean()
        self.reduce_min = P.ReduceMin()
        self.cast = P.Cast()
        self.minimum = P.Minimum()
        self.expand_dims = P.ExpandDims()
        self.select = P.Select()
        self.fill = P.Fill()
        self.topk = P.TopK(sorted=True)
        self.shape = P.Shape()
        self.sigmoid = P.Sigmoid()
        self.reshape = P.Reshape()
        self.slice = P.Slice()
        self.logical_and = P.LogicalAnd()
        self.logical_or = P.LogicalOr()
        self.equal = P.Equal()
        self.zeros_like = P.ZerosLike()
        self.add = P.Add()
        self.gather = P.Gather()
 def __init__(self, summary_type, tag, data):
     super(SummaryNet, self).__init__()
     self.tag = tag
     self.data = data
     self.summary_fn = getattr(P, summary_type)()
     self.one = Tensor(np.array([1]).astype(np.float32))
     self.add = P.Add()
 def __init__(self):
     super(Assign_WAR, self).__init__()
     self.assign = P.Assign()
     self.sub = P.Sub()
     self.add = P.Add()
     self.para = Parameter(Tensor(1, dtype=ms.int32), name='para')
     self.weight = Parameter(Tensor(5, dtype=ms.int32), name='weight')
Exemple #25
0
 def __init__(self):
     super(LogSigmoid, self).__init__()
     self.mul = P.Mul()
     self.exp = P.Exp()
     self.add = P.Add()
     self.rec = P.Reciprocal()
     self.log = P.Log()
Exemple #26
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 down_sample=False):
        super(ResidualBlock, self).__init__()

        out_chls = out_channels // self.expansion
        self.conv1 = conv1x1(in_channels, out_chls, stride=1, padding=0)
        self.bn1 = nn.BatchNorm2d(out_chls)

        self.conv2 = conv3x3(out_chls, out_chls, stride=stride, padding=1)
        self.bn2 = nn.BatchNorm2d(out_chls)

        self.conv3 = conv1x1(out_chls, out_channels, stride=1, padding=0)
        self.bn3 = nn.BatchNorm2d(out_channels)

        self.relu = nn.ReLU()
        self.downsample = down_sample

        if self.downsample:
            self.conv_down_sample = conv1x1(in_channels, out_channels,
                                            stride=stride, padding=0)
            self.bn_down_sample = nn.BatchNorm2d(out_channels)
        self.add = P.Add()
Exemple #27
0
    def __init__(self,
                 in_channel,
                 out_channel,
                 stride=1):
        super(ResidualBlock, self).__init__()

        channel = out_channel // self.expansion
        self.conv1 = _conv1x1(in_channel, channel, stride=1)
        self.bn1 = _bn(channel)

        self.conv2 = _conv3x3(channel, channel, stride=stride)
        self.bn2 = _bn(channel)

        self.conv3 = _conv1x1(channel, out_channel, stride=1)
        self.bn3 = _bn_last(out_channel)

        self.relu = nn.ReLU()

        self.down_sample = False

        if stride != 1 or in_channel != out_channel:
            self.down_sample = True
        self.down_sample_layer = None

        if self.down_sample:
            self.down_sample_layer = nn.SequentialCell([_conv1x1(in_channel, out_channel, stride),
                                                        _bn(out_channel)])
        self.add = P.Add()
 def __init__(self, mul_7_w_shape, add_8_bias_shape):
     """init function"""
     super(LayerNorm, self).__init__()
     self.reducemean_0 = P.ReduceMean(keep_dims=True)
     self.sub_1 = P.Sub()
     self.pow_2 = P.Pow()
     self.pow_2_input_weight = 2.0
     self.reducemean_3 = P.ReduceMean(keep_dims=True)
     self.add_4 = P.Add()
     self.add_4_bias = 9.999999960041972e-13
     self.sqrt_5 = P.Sqrt()
     self.div_6 = P.Div()
     self.mul_7 = P.Mul()
     self.mul_7_w = Parameter(Tensor(np.random.uniform(0, 1, mul_7_w_shape).astype(np.float32)), name=None)
     self.add_8 = P.Add()
     self.add_8_bias = Parameter(Tensor(np.random.uniform(0, 1, add_8_bias_shape).astype(np.float32)), name=None)
Exemple #29
0
    def __init__(self,
                 in_channels,
                 out_channels,
                 stride=1,
                 momentum=0.9):
        super(ResidualBlock, self).__init__()

        out_chls = out_channels // self.expansion
        self.conv1 = _conv1x1(in_channels, out_chls, stride=1)
        self.bn1 = _fused_bn(out_chls, momentum=momentum)

        self.conv2 = _conv3x3(out_chls, out_chls, stride=stride)
        self.bn2 = _fused_bn(out_chls, momentum=momentum)

        self.conv3 = _conv1x1(out_chls, out_channels, stride=1)
        self.bn3 = _fused_bn(out_channels, momentum=momentum)

        self.relu = P.ReLU()
        self.downsample = (in_channels != out_channels)
        self.stride = stride
        if self.downsample:
            self.conv_down_sample = _conv1x1(in_channels, out_channels,
                                             stride=stride)
            self.bn_down_sample = _fused_bn(out_channels, momentum=momentum)
        elif self.stride != 1:
            self.maxpool_down = nn.MaxPool2d(kernel_size=1, stride=2, pad_mode='same')

        self.add = P.Add()
 def construct(self, a, b, x):
     if a < b:
         a = P.Add()(a, b)
     else:
         a = P.Sub()(a, b)
     if a == x:
         a = P.Mul()(a, b)
     else:
         a = P.RealDiv()(a, b)
     if b == x:
         b = P.Add()(a, b)
     else:
         b = P.Add()(a, x)
     a = a * b
     out = a + b + x
     return out