def __init__(self, in_channel, x): super().__init__() #self._save_graphs(save_graph_flag=True, save_graph_path=".") self.biasadd = P.BiasAdd() self.equal = P.Equal() self.addn = P.AddN() self.conv = Conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=1, stride=1, has_bias=False, weight_init='ones', pad_mode='same') self.bn = BatchNorm2d(num_features=in_channel) self.assignadd = P.AssignAdd() self.assign = P.Assign() self.relu = ReLU() self.mean = P.ReduceMean(keep_dims=False) self.bias = Parameter(Tensor( np.random.randint(2, size=(3, )).astype((np.float32))), name="bias") self.bias2 = Parameter(Tensor(np.ones([3]).astype(np.float32)), name="bias2") self.parameterupdate = ParameterUpdate(self.bias) self.value = Tensor(np.random.randn(*(3, )), ms.float32) self.x = x
def __init__(self): super(Base_model, self).__init__() #cfgs_zh = {'16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512]} cfgs_zh = { '19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512] } #cfgs_zh = {'16': [64, 64,128, 128, 256, 256, 256, 512, 512, 512]} self.vgg_base = Vgg(cfgs_zh['19'], batch_norm=False) #self.vgg_base = VGG_Base() #self.vgg_base = VGG_Base_MS() self.conv4_3_CPM = Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv4_4_CPM = Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU()
def __init__(self, in_dim, out_dim): super(GCNAggregator, self).__init__() self.add = ops.add() self.div = ops.TensorDiv() self.spmm = ops.SparseDenseMatmul() self.fc = Dense(in_dim, out_dim) self.relu = ReLU()
def __init__(self, strategy_dict=None): super().__init__() shared_np = np.full((16, 1, 32, 32), 0.5, dtype=np.float32) self.shared_weight = Parameter(Tensor(shared_np), name='shared_weight') self.fc1 = Dense(in_channels=1024, out_channels=116, weight_init='ones', bias_init='ones', has_bias=True) self.relu = ReLU() self.sigmoid = P.Sigmoid() self.add1 = P.TensorAdd() self.add2 = P.TensorAdd() self.mul1 = P.Mul().add_prim_attr('primitive_target', 'CPU') self.mul2 = P.Mul() self.mul3 = P.Mul() self.flatten = Flatten() mul2_weight_np = np.full((16, 116), 1, dtype=np.float32) self.mul2_weight = Parameter(Tensor(mul2_weight_np), name='mul2_weight') mul3_weight_np = np.full((16, 116), 1, dtype=np.float32) self.mul3_weight = Parameter(Tensor(mul3_weight_np), name='mul3_weight') if strategy_dict is not None: self.add1.shard(strategy_dict['add1']) self.mul1.shard(strategy_dict['mul1']) self.fc1.matmul.shard(strategy_dict['fc1_matmul']) self.fc1.bias_add.shard(strategy_dict['fc1_bias_add']) self.mul2.shard(strategy_dict['mul2']) self.mul3.shard(strategy_dict['mul3'])
def __init__(self): super().__init__() #self._save_graphs(save_graph_flag=True, save_graph_path=".") self.relu = ReLU() self.mean = P.ReduceMean(keep_dims=False) self.assign_sub = P.AssignSub() self.input_data = Parameter(initializer(1, [1, 3, 2, 2], ms.float32), name='value')
def __init__(self, input_channel, out_channel): super(AllGatherNet, self).__init__() self.dense = Dense(input_channel, out_channel) if GlobalComm.BACKEND is Backend.HCCL: self.allgather = AllGather(group=HCCL_WORLD_COMM_GROUP) elif GlobalComm.BACKEND is Backend.NCCL: self.allgather = AllGather(group=NCCL_WORLD_COMM_GROUP) else: self.allgather = AllGather() self.relu = ReLU()
def __init__(self, batch_size, input_channel, out_channel): super(AllSwapNet, self).__init__() self.dense = Dense(input_channel, out_channel) self.allswap = AllSwap() self.relu = ReLU() self.reduce = ReduceSum() part_slice = batch_size / 2 self.send_size = Tensor( [0, part_slice * out_channel, part_slice * out_channel], mindspore.int64) self.recv_size = Tensor( [part_slice * out_channel, part_slice * out_channel, 0], mindspore.int64)
def __init__(self, batch_size, input_channel, out_channel): super(AllSwapNet, self).__init__() self.dense = Dense(input_channel, out_channel) self.allswap = AllSwap() self.relu = ReLU() part_slice = batch_size / 2 self.send_size = Tensor( [0, part_slice * out_channel, part_slice * out_channel], mindspore.int64) self.recv_size = Tensor( [part_slice * out_channel, part_slice * out_channel, 0], mindspore.int64) self.gatherv2 = GatherV2() self.input = Tensor(np.ones([1]), mindspore.int32)
def __init__(self, in_channels, out_channels, kernel_size, vocab_size, embedding_size, output_channels, target, sparse): super().__init__() set_seed(5) self.relu = ReLU() self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, has_bias=True, weight_init='normal') self.batchnorm = BatchNorm2d(num_features=out_channels) self.embedding_lookup = EmbeddingLookup(vocab_size=vocab_size, embedding_size=embedding_size, param_init='normal', target=target, sparse=sparse) self.flatten = Flatten() self.cast = op.Cast() self.bias = Parameter(Tensor(np.ones([output_channels]).astype(np.float32)), name='bias') self.biasadd = op.BiasAdd() self.type = mindspore.int32
def __init__(self, in_channel, out_channel, axis, input_shape, mul_size, test_size, prelu_size, transpose_b, matmul_size, num_class): super().__init__() mul_np = np.full(mul_size, 0.5, dtype=np.float32) self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight") bias_np = np.full((12, ), 7.1, dtype=np.float32) self.bias = Parameter(Tensor(bias_np), name="bias") prelu_np = np.full(prelu_size, 0.8, dtype=np.float32) self.prelu_weight = Parameter(Tensor(prelu_np), name="prelu_weight") matmul_np = np.full(matmul_size, 1.1, dtype=np.float32) self.matmul_weight = Parameter(Tensor(matmul_np), name="matmul_weight") self.mul = P.Mul() self.conv = Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=5, has_bias=True, weight_init='ones', bias_init='ones', pad_mode='valid') self.scalar = 0.5 self.parameter = Parameter(initializer(0.5, test_size, dtype=mstype.float32), name='parameter') self.tensor = Tensor(np.full(test_size, 0.05, dtype=np.float32)) self.softmax = Softmax(axis=axis) self.relu = ReLU() self.relu.relu.add_prim_attr("primitive_target", "CPU") self.reshape = P.Reshape() self.input_shape = input_shape self.equal = P.Equal() self.cast = P.Cast() self.concat = P.Concat(axis=1) self.reduce_sum = P.ReduceSum() self.bias_add = P.BiasAdd() self.cos = P.Cos() self.prelu = P.PReLU() self.matmul = P.MatMul(transpose_b=transpose_b) self.l2norm = P.L2Normalize(axis=(1 - axis)) self.tensoradd = P.TensorAdd() self.strided_slice = P.StridedSlice() self.dense = Dense(in_channels=6, out_channels=num_class, weight_init='ones', bias_init='ones', has_bias=True)
def __init__(self): super().__init__() self.bn1 = BatchNorm2d(num_features=4, eps=1e-4, momentum=0.9, gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1, data_format="NHWC") self.bn2 = BatchNorm2d(num_features=4, eps=1e-4, momentum=0.9, gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1, data_format="NHWC") self.add = P.Add() self.relu = ReLU() self.conv2d1 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC") self.conv2d2 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC") self.conv2d3 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC") self.conv2d4 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC")
def __init__(self, U): super(Net, self).__init__() self.relu = ReLU() self.U = U self.fill = P.Fill()
def __init__(self): super(TensorAddNetMe, self).__init__() self.relu = ReLU() self.add = Add()
def __init__(self, input_channel, out_channel, op): super(HostReduceScatterNet, self).__init__() self.dense = Dense(input_channel, out_channel) self.hostreducescatter = HostReduceScatter(op, (0, 1)) self.relu = ReLU()
def __init__(self): super().__init__() self.seq = SequentialCell([AvgPool2d(3, 1), ReLU(), Flatten()]) self.values = list(self.seq._cells.values())
def __init__(self): super().__init__() self.tuple = (ReLU(), ReLU())
def __init__(self): super().__init__() MetaFactory.__init__(self) self.relu = ReLU() self.tanh = nn.Tanh() self.add = Add()
def __init__(self, number, loop_count=1): super().__init__() self.number = number self.loop_count = loop_count self.relu = ReLU()
def __init__(self): super(Stage_x, self).__init__() self.conv1_L1 = Conv2d(in_channels=185, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv2_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv3_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv4_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv5_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv6_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv7_L1 = Conv2d(in_channels=128, out_channels=38, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv1_L2 = Conv2d(in_channels=185, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv2_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv3_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv4_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv5_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv6_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv7_L2 = Conv2d(in_channels=128, out_channels=19, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU()
def __init__(self, input_channel, output_channel): super(HostAllGatherNet, self).__init__() self.dense = Dense(input_channel, output_channel) self.hostallgather = HostAllGather((0, 1)) self.relu = ReLU()
def __init__(self, input_channel, out_channel, op): super(ReduceScatterNet, self).__init__() self.dense = Dense(input_channel, out_channel) self.reducescatter = ReduceScatter(op) self.relu = ReLU()
def __init__(self, U): super(Net, self).__init__() self.relu = ReLU() self.U = U
def __init__(self): super().__init__() MetaFactory.__init__(self) self.relu = ReLU() self.tanh = nn.Tanh() self.softmax = nn.Softmax()
def __init__(self): super().__init__() self.relu = ReLU()
def __init__(self, symbol, loop_count=(1, 3)): super().__init__() self.symbol = symbol self.loop_count = loop_count self.fla = P.Flatten() self.relu = ReLU()
def __init__(self, input_channel, out_channel, op): super(AllReduceNet, self).__init__() self.dense = Dense(input_channel, out_channel) self.reduce = AllReduce(op) self.relu = ReLU()
def __init__(self): super(VGG_Base_MS, self).__init__() self.Layer1_1 = Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer1_2 = Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer2_1 = Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer2_2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_1 = Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_2 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_3 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_4 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer4_1 = Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer4_2 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU() self.max_pooling_2d = nn.MaxPool2d(kernel_size=2, stride=2)
def __init__(self, input_channel, out_channel): super(AlltoAllNet, self).__init__() self.dense = Dense(input_channel, out_channel) self.alltoall = _AlltoAll(1, 0, 1) self.relu = ReLU()
def __init__(self): super(Stage_1, self).__init__() self.conv1_CPM_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv2_CPM_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv3_CPM_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv4_CPM_L1 = Conv2d(in_channels=128, out_channels=512, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv5_CPM_L1 = Conv2d(in_channels=512, out_channels=38, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv1_CPM_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv2_CPM_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv3_CPM_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv4_CPM_L2 = Conv2d(in_channels=128, out_channels=512, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv5_CPM_L2 = Conv2d(in_channels=512, out_channels=19, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU()