예제 #1
0
 def __init__(self):
     super(Net, self).__init__()
     self.relu_v2 = P.ReLUV2()
예제 #2
0
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('TanhGrad', {
     'block': G.TanhGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]],
     'skip': ['backward']}),
 ('ReLU', {
     'block': P.ReLU(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLU6', {
     'block': P.ReLU6(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLUV2', {
     'block': P.ReLUV2(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4], [1, 3, 4, 4]]}),
 ('ReLUGrad', {
     'block': G.ReluGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'skip': ['backward']}),
 ('Elu', {
     'block': P.Elu(),
     'desc_inputs': [[2, 3, 4]],
     'desc_bprop': [[2, 3, 4]]}),
 ('EluGrad', {
     'block': G.EluGrad(),
     'desc_inputs': [[2, 3, 4], [2, 3, 4]],
     'desc_bprop': [[2, 3, 4]],
     'skip': ['backward']}),
예제 #3
0
 def __init__(self, mul_weight, strategy=None):
     super(Net, self).__init__()
     self.reluv2 = P.ReLUV2().shard(strategy)
     self.mul = P.Mul()
     self.weight = Parameter(mul_weight, "w1")