Beispiel #1
0
 def __init__(self):
     super(NetReluGrad, self).__init__()
     self.rekuGrad = G.ReluGrad()
     self.x = Parameter(initializer(Tensor(np.array([[[[-1, 1, 1],
                                                       [1, -1, 1],
                                                       [1, 1, -1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='x')
     self.dy = Parameter(initializer(Tensor(np.array([[[[1, 0, 1],
                                                        [0, 1, 0],
                                                        [1, 1, 1]]]]).astype(np.float32)), [1, 1, 3, 3]), name='dy')
Beispiel #2
0
     'desc_bprop': [[1, 3, 4, 4]],
     'skip': ['backward']}),
 ('ReLU', {
     'block': P.ReLU(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLU6', {
     'block': P.ReLU6(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
 ('ReLUV2', {
     'block': P.ReLUV2(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4], [1, 3, 4, 4]]}),
 ('ReLUGrad', {
     'block': G.ReluGrad(),
     'desc_inputs': [[1, 3, 4, 4], [1, 3, 4, 4]],
     'skip': ['backward']}),
 ('Elu', {
     'block': P.Elu(),
     'desc_inputs': [[2, 3, 4]],
     'desc_bprop': [[2, 3, 4]]}),
 ('EluGrad', {
     'block': G.EluGrad(),
     'desc_inputs': [[2, 3, 4], [2, 3, 4]],
     'desc_bprop': [[2, 3, 4]],
     'skip': ['backward']}),
 ('Sigmoid', {
     'block': P.Sigmoid(),
     'desc_inputs': [[1, 3, 4, 4]],
     'desc_bprop': [[1, 3, 4, 4]]}),
Beispiel #3
0
 def __init__(self):
     super(Net, self).__init__()
     self.relu_grad = G.ReluGrad()
 def __init__(self):
     super(NetReluGrad, self).__init__()
     self.rekuGrad = G.ReluGrad()
Beispiel #5
0
 def __init__(self):
     super(AddReluNet, self).__init__()
     self.add = P.TensorAdd()
     self.relu = P.ReLU()
     self.relu_grad = G.ReluGrad()
Beispiel #6
0
 def __init__(self):
     super(ReluNet, self).__init__()
     self.relu = P.ReLU()
     self.relu_grad = G.ReluGrad()