def __init__(self): super(Net, self).__init__() self.bias_add_grad = G.BiasAddGrad()
'desc_inputs': [[3]], 'desc_bprop': [[3]]}), ('Atan2', { 'block': P.Atan2(), 'desc_inputs': [Tensor(np.array([0, 1]).astype(np.float32)), Tensor(np.array([1, 1]).astype(np.float32))], 'desc_bprop': [[2]]}) ] test_case_nn_ops = [ ('BiasAdd', { 'block': P.BiasAdd(), 'desc_inputs': [[1, 3, 3, 3], [3]], 'desc_bprop': [[1, 3, 3, 3]]}), ('BiasAddGrad', { 'block': G.BiasAddGrad(), 'desc_inputs': [[1, 3, 3, 3]], 'skip': ['backward']}), ('Gelu', { 'block': P.Gelu(), 'desc_inputs': [[1, 3, 4, 4]], 'desc_bprop': [[1, 3, 4, 4]]}), ('GeluGrad', { 'block': G.GeluGrad(), 'desc_inputs': [[2, 2], [2, 2], [2, 2]], 'desc_bprop': [[2, 2]], 'skip': ['backward']}), ('Tanh', { 'block': P.Tanh(), 'desc_inputs': [[1, 3, 4, 4]], 'desc_bprop': [[1, 3, 4, 4]]}),
def __init__(self): super(Net, self).__init__() self.cast = P.Cast() self.relu = P.ReLU() self.biasaddgrad = G.BiasAddGrad()
from mindspore.ops import operations as P from mindspore.ops.operations import _grad_ops as G Conv = P.Conv2D(out_channel=64, kernel_size=7, mode=1, pad_mode="valid", pad=0, stride=1, dilation=1, group=1) Relu = P.ReLU() Fusion = Primitive('FusionOp') Reduce = P.ReduceOp() Biasadd = P.BiasAdd() Biasaddgrad = G.BiasAddGrad() Cast = P.Cast() MatMul = P.MatMul() Fusion_relu_relu = Primitive('FusionOp_ReLU_ReLU') Fusion_biasadd = Primitive('FusionOp_ReLU_ReLU_ReLU_BiasAdd_ReLU_ReLU_ReLU') Fusion_biasaddgrad = Primitive( 'FusionOp_ReLU_ReLU_ReLU_BiasAddGrad_ReLU_ReLU_ReLU') Fusion_matmul_relu = Primitive('FusionOp_MatMul_ReLU') Add = P.Add() Sub = P.Sub() make_tuple = Primitive('make_tuple') class FnDict: