def __init__(self): super(Base_model, self).__init__() #cfgs_zh = {'16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512]} cfgs_zh = { '19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512] } #cfgs_zh = {'16': [64, 64,128, 128, 256, 256, 256, 512, 512, 512]} self.vgg_base = Vgg(cfgs_zh['19'], batch_norm=False) #self.vgg_base = VGG_Base() #self.vgg_base = VGG_Base_MS() self.conv4_3_CPM = Conv2d(in_channels=512, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv4_4_CPM = Conv2d(in_channels=256, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU()
def __init__(self, in_channel, x): super().__init__() #self._save_graphs(save_graph_flag=True, save_graph_path=".") self.biasadd = P.BiasAdd() self.equal = P.Equal() self.addn = P.AddN() self.conv = Conv2d(in_channels=in_channel, out_channels=in_channel, kernel_size=1, stride=1, has_bias=False, weight_init='ones', pad_mode='same') self.bn = BatchNorm2d(num_features=in_channel) self.assignadd = P.AssignAdd() self.assign = P.Assign() self.relu = ReLU() self.mean = P.ReduceMean(keep_dims=False) self.bias = Parameter(Tensor( np.random.randint(2, size=(3, )).astype((np.float32))), name="bias") self.bias2 = Parameter(Tensor(np.ones([3]).astype(np.float32)), name="bias2") self.parameterupdate = ParameterUpdate(self.bias) self.value = Tensor(np.random.randn(*(3, )), ms.float32) self.x = x
def test_conv2d_abnormal_kernel_truncated_normal(): input_data = init.initializer(init.TruncatedNormal(), [64, 3, 7, 7], ms.float32).to_tensor() context.set_context(mode=context.GRAPH_MODE) model = ms.Model( Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=3, padding=0, weight_init="truncatednormal")) model.predict(input_data)
def test_conv2d_abnormal_kernel_normal(): kernel = np.random.randn(64, 3, 7, 7).astype(np.float32) input_data = np.random.randn(32, 3, 224, 112).astype(np.float32) context.set_context(mode=context.GRAPH_MODE) model = ms.Model( Conv2d(in_channels=3, out_channels=64, kernel_size=7, stride=3, padding=0, weight_init=ms.Tensor(kernel))) model.predict(ms.Tensor(input_data))
def test_conv2d_abnormal_kernel_negative(): kernel = np.random.randn(64, 3, 7, 7).astype(np.float32) with py.raises(ValueError): ms.Model( Conv2d(in_channels=3, out_channels=64, kernel_size=-7, stride=3, padding=0, weight_init=ms.Tensor(kernel)))
def __init__(self): super().__init__() self.bn1 = BatchNorm2d(num_features=4, eps=1e-4, momentum=0.9, gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1, data_format="NHWC") self.bn2 = BatchNorm2d(num_features=4, eps=1e-4, momentum=0.9, gamma_init=1, beta_init=0, moving_mean_init=0, moving_var_init=1, data_format="NHWC") self.add = P.Add() self.relu = ReLU() self.conv2d1 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC") self.conv2d2 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC") self.conv2d3 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC") self.conv2d4 = Conv2d(in_channels=4, out_channels=4, kernel_size=2, data_format="NHWC")
def __init__(self, in_channels, out_channels, kernel_size, vocab_size, embedding_size, output_channels, target, sparse): super().__init__() set_seed(5) self.relu = ReLU() self.conv = Conv2d(in_channels=in_channels, out_channels=out_channels, kernel_size=kernel_size, has_bias=True, weight_init='normal') self.batchnorm = BatchNorm2d(num_features=out_channels) self.embedding_lookup = EmbeddingLookup(vocab_size=vocab_size, embedding_size=embedding_size, param_init='normal', target=target, sparse=sparse) self.flatten = Flatten() self.cast = op.Cast() self.bias = Parameter(Tensor(np.ones([output_channels]).astype(np.float32)), name='bias') self.biasadd = op.BiasAdd() self.type = mindspore.int32
def __init__(self, in_channel, out_channel): super().__init__() self.relu = PReLU(channel=in_channel, w=0.25) self.bn = BatchNorm2d(num_features=in_channel) self.conv = Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=2, stride=1, has_bias=False, weight_init='ones', pad_mode='same') self.mean = P.ReduceMean(keep_dims=False) self.fc = Dense(in_channels=out_channel, out_channels=out_channel, weight_init='ones', bias_init='zeros', has_bias=True)
def __init__(self, in_channel, out_channel, axis, input_shape, mul_size, test_size, prelu_size, transpose_b, matmul_size, num_class): super().__init__() mul_np = np.full(mul_size, 0.5, dtype=np.float32) self.mul_weight = Parameter(Tensor(mul_np), name="mul_weight") bias_np = np.full((12, ), 7.1, dtype=np.float32) self.bias = Parameter(Tensor(bias_np), name="bias") prelu_np = np.full(prelu_size, 0.8, dtype=np.float32) self.prelu_weight = Parameter(Tensor(prelu_np), name="prelu_weight") matmul_np = np.full(matmul_size, 1.1, dtype=np.float32) self.matmul_weight = Parameter(Tensor(matmul_np), name="matmul_weight") self.mul = P.Mul() self.conv = Conv2d(in_channels=in_channel, out_channels=out_channel, kernel_size=5, has_bias=True, weight_init='ones', bias_init='ones', pad_mode='valid') self.scalar = 0.5 self.parameter = Parameter(initializer(0.5, test_size, dtype=mstype.float32), name='parameter') self.tensor = Tensor(np.full(test_size, 0.05, dtype=np.float32)) self.softmax = Softmax(axis=axis) self.relu = ReLU() self.relu.relu.add_prim_attr("primitive_target", "CPU") self.reshape = P.Reshape() self.input_shape = input_shape self.equal = P.Equal() self.cast = P.Cast() self.concat = P.Concat(axis=1) self.reduce_sum = P.ReduceSum() self.bias_add = P.BiasAdd() self.cos = P.Cos() self.prelu = P.PReLU() self.matmul = P.MatMul(transpose_b=transpose_b) self.l2norm = P.L2Normalize(axis=(1 - axis)) self.tensoradd = P.TensorAdd() self.strided_slice = P.StridedSlice() self.dense = Dense(in_channels=6, out_channels=num_class, weight_init='ones', bias_init='ones', has_bias=True)
def _make_layer(self, cfg, batch_norm=False): layers = [] in_channels = 3 for v in cfg: if v == 'M': layers += [ nn.MaxPool2d(kernel_size=2, stride=2, pad_mode='same') ] else: conv2d = Conv2d(in_channels=in_channels, out_channels=v, kernel_size=3, stride=1, pad_mode='same', has_bias=True) if batch_norm: layers += [conv2d, nn.BatchNorm2d(v), nn.ReLU()] else: layers += [conv2d, nn.ReLU()] in_channels = v return nn.SequentialCell(layers)
def __init__(self): super(Stage_x, self).__init__() self.conv1_L1 = Conv2d(in_channels=185, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv2_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv3_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv4_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv5_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv6_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv7_L1 = Conv2d(in_channels=128, out_channels=38, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv1_L2 = Conv2d(in_channels=185, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv2_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv3_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv4_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv5_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=7, stride=1, pad_mode='same', has_bias=True) self.conv6_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv7_L2 = Conv2d(in_channels=128, out_channels=19, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU()
def __init__(self): super(Stage_1, self).__init__() self.conv1_CPM_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv2_CPM_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv3_CPM_L1 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv4_CPM_L1 = Conv2d(in_channels=128, out_channels=512, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv5_CPM_L1 = Conv2d(in_channels=512, out_channels=38, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv1_CPM_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv2_CPM_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv3_CPM_L2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.conv4_CPM_L2 = Conv2d(in_channels=128, out_channels=512, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.conv5_CPM_L2 = Conv2d(in_channels=512, out_channels=19, kernel_size=1, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU()
def __init__(self): super(VGG_Base_MS, self).__init__() self.Layer1_1 = Conv2d(in_channels=3, out_channels=64, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer1_2 = Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer2_1 = Conv2d(in_channels=64, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer2_2 = Conv2d(in_channels=128, out_channels=128, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_1 = Conv2d(in_channels=128, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_2 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_3 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer3_4 = Conv2d(in_channels=256, out_channels=256, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer4_1 = Conv2d(in_channels=256, out_channels=512, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.Layer4_2 = Conv2d(in_channels=512, out_channels=512, kernel_size=3, stride=1, pad_mode='same', has_bias=True) self.relu = ReLU() self.max_pooling_2d = nn.MaxPool2d(kernel_size=2, stride=2)