Exemplo n.º 1
0
def sample():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    print(gradient)
Exemplo n.º 2
0
def sample():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    print(gradient)
Exemplo n.º 3
0
 def minimize(self, obj):
     variables = obj.list_input_variables()
     grads = _base.gradients(obj, variables)
     updates = []
     for i, v in enumerate(variables):
         self.m.append(_base.Variable(_sym.zeros_like(v), self.name + '_m' + str(i)))
         self.v.append(_base.Variable(_sym.zeros_like(v), self.name + '_v' + str(i)))
     update_t = _sym.assign(self.t, self.t + 1)
     rate = _sym.sqrt(1 - self.beta2 ** update_t) / (1 -  self.beta1 ** update_t)
     lr_t = self.learning_rate * rate
     for var, g, m, v in zip(variables, grads, self.m, self.v):
         update_m = _sym.assign(m, self.beta1 * m + (1 - self.beta1) * g)
         update_v = _sym.assign(v, self.beta2 * v + (1 - self.beta2) * g * g)
         update_var = _sym.assign(var,
             var - lr_t * update_m / (_sym.sqrt(update_v) + self.epsilon))
         updates.append(update_var)
     return _base.group(*updates)
Exemplo n.º 4
0
 def minimize(self, obj):
     variables = obj.list_input_variables()
     grads = _base.gradients(obj, variables)
     updates = []
     for i, v in enumerate(variables):
         self.m.append(
             _base.Variable(_sym.zeros_like(v), self.name + '_m' + str(i)))
         self.v.append(
             _base.Variable(_sym.zeros_like(v), self.name + '_v' + str(i)))
     update_t = _sym.assign(self.t, self.t + 1)
     rate = _sym.sqrt(1 - self.beta2**update_t) / (1 - self.beta1**update_t)
     lr_t = self.learning_rate * rate
     for var, g, m, v in zip(variables, grads, self.m, self.v):
         update_m = _sym.assign(m, self.beta1 * m + (1 - self.beta1) * g)
         update_v = _sym.assign(v,
                                self.beta2 * v + (1 - self.beta2) * g * g)
         update_var = _sym.assign(
             var,
             var - lr_t * update_m / (_sym.sqrt(update_v) + self.epsilon))
         updates.append(update_var)
     return _base.group(*updates)
Exemplo n.º 5
0
 def simple_bn(x, gamma, beta, moving_mean, moving_var,
               axis=1, epsilon=1e-5, shape=None):
     # expect = (x - moving_mean) / sym.sqrt(moving_var + eps) * gamma + beta
     scale = sym.elemwise_mul(1 / sym.sqrt(moving_var + epsilon), gamma)
     shift = sym.elemwise_add(
         sym.elemwise_mul(sym.negative(moving_mean), scale), beta)
     # for 2D
     num_newaxis=len(shape) - axis - 1
     if num_newaxis:
         scale = sym.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = sym.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
def test_multi_loss_graph_gradients():
    # input data
    shape1 = (1000, 100)
    data1 = sym.Variable('data1', shape=(1000, 100), dtype=0)

    # fake non-sparse label
    label = sym.full(fill_value=3)

    # square loss
    sub1 = sym.elemwise_sub(data1, label, name="sub1")
    square_loss = sym.sum(data=sub1**2, axis=1, name="square_loss")

    # fake loss1
    shape2 = (1000, )
    data2 = sym.Variable('data2', shape=shape2, dtype=0)
    loss1 = sym.sqrt(data2, name="loss1")

    # fake loss2
    loss2 = sym.relu(data1, name='loss2')

    # block loss1
    total_loss = sym.elemwise_sum(sym.block_grad(loss1),
                                  square_loss,
                                  num_args=2,
                                  name="total_loss")

    # grad_g.symbol.list_output_names()
    # >> ['loss1_grad_0_output', 'grad_sum_output']
    grad_g = graph_util.get_gradient_graph([total_loss, loss2],
                                           total_loss.list_input_variables())
    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)
    assert out_shapes == [list(shape2), list(shape1)]

    # grad_data1 is elemwise_sum of grad_loss2, grad_square_loss
    grad_data1 = grad_g.symbol[1]
    assert grad_data1.list_attr()['num_args'] == '2'

    # block grad should return zero grad
    grad_data2 = grad_g.symbol[0]
    assert 'zeros_like' in grad_g.ir()

    # test reverse infer shape for label
    assert grad_g.apply('InferShape').json_attr('shape_num_unknown_nodes') == 0

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32']

    # test reverse infer type for label
    assert grad_g.apply('InferType').json_attr('dtype_num_unknown_nodes') == 0
Exemplo n.º 7
0
def test_multi_loss_graph_gradients():
    # input data
    shape1 = (1000, 100)
    data1 = sym.Variable('data1', shape=(1000, 100), dtype=0)

    # fake non-sparse label
    label = sym.full(fill_value=3)

    # square loss
    sub1 = sym.elemwise_sub(data1, label, name="sub1")
    square_loss = sym.sum(data=sub1**2, axis=1, name="square_loss")

    # fake loss1
    shape2 = (1000, )
    data2 = sym.Variable('data2', shape=shape2, dtype=0)
    loss1 = sym.sqrt(data2, name="loss1")

    # fake loss2
    loss2 = sym.relu(data1, name='loss2')

    # block loss1
    total_loss = sym.elemwise_sum(
        sym.block_grad(loss1),
        square_loss,
        num_args=2,
        name="total_loss")

    # grad_g.symbol.list_output_names()
    # >> ['loss1_grad_0_output', 'grad_sum_output']
    grad_g = graph_util.get_gradient_graph([total_loss, loss2], total_loss.list_input_variables())
    # infer shape
    in_shapes, out_shapes = graph_util.infer_shape(grad_g)
    assert out_shapes == [list(shape2), list(shape1)]

    # grad_data1 is elemwise_sum of grad_loss2, grad_square_loss
    grad_data1 = grad_g.symbol[1]
    assert grad_data1.list_attr()['num_args'] == '2'

    # block grad should return zero grad
    grad_data2 = grad_g.symbol[0]
    assert 'zeros_like' in grad_g.ir()

    # test reverse infer shape for label
    assert grad_g.apply('InferShape').json_attr('shape_num_unknown_nodes') == 0

    # infer type
    in_dtypes, out_dtypes = graph_util.infer_dtype(grad_g)
    assert out_dtypes == ['float32', 'float32']
    
    # test reverse infer type for label
    assert grad_g.apply('InferType').json_attr('dtype_num_unknown_nodes') == 0
Exemplo n.º 8
0
    def test_fusible_network():
        """ The network is as following:
                    data
                      |
                     exp
                    /   \
                 sqrt   log
                    \   /
                    b_add
                      |
                    tanh
        """
        batch_size = 1
        data_shape = (batch_size, 3, 224, 224)
        data = symbol.Variable('data', shape=data_shape, dtype="float32")
        shape_dict = {"data": data_shape}
        params = {}
        params["data"] = np.random.uniform(-1, 1,
                                           size=data_shape).astype("float32")

        exp = symbol.exp(data, name='exp')
        sqrt = symbol.sqrt(exp, name='sqrt')
        log = symbol.log(exp, name='log')
        ret = sqrt + log
        ret = symbol.tanh(ret)

        # Fuse log and broadcast_add.
        check_annotated_graph(ret, ['exp', 'log', 'broadcast_add'], 8,
                              shape_dict,
                              params)

        # Fuse log, broadcast_add, and tanh
        check_annotated_graph(ret, ['exp', 'sqrt', 'none', 'elemwise_add'], 6,
                              shape_dict, params)

        # No operator will be fused.
        check_annotated_graph(ret, ['log', 'sqrt', 'none', 'tanh'], 11,
                              shape_dict, params)

        # All operators will be fused.
        check_annotated_graph(ret, [''], 2, shape_dict, params)

        # All operators will be fused since all of them are annotated to the
        # same device.
        check_annotated_graph(ret,
                              ['exp', 'sqrt', 'broadcast_add', 'none', 'log',
                               'tanh'], 2, shape_dict, params)

        # Fuse exp, sqrt, log, and boradcast_add
        check_annotated_graph(ret, ['tanh'], 4, shape_dict, params)
Exemplo n.º 9
0
def test_gradient():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    assert len(gradient) == 2

    g1 = sym.Variable("g1")
    g2 = sym.Variable("g2")
    grad_ys = [g1, g2]
    gradient = graph_util.gradients(sym.Group([z1, z2]),
                               sym.Group([x, y]), grad_ys=grad_ys)
    g_graph = graph.create(sym.Group(gradient)).ir()
    assert len(gradient) == 2
    assert "g1" in g_graph
    assert "g2" in g_graph
Exemplo n.º 10
0
def test_gradient():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    gradient = graph_util.gradients([z1, z2], [x, y])
    assert len(gradient) == 2

    g1 = sym.Variable("g1")
    g2 = sym.Variable("g2")
    grad_ys = [g1, g2]
    gradient = graph_util.gradients(sym.Group([z1, z2]),
                                    sym.Group([x, y]),
                                    grad_ys=grad_ys)
    g_graph = graph.create(sym.Group(gradient)).ir()
    assert len(gradient) == 2
    assert "g1" in g_graph
    assert "g2" in g_graph
Exemplo n.º 11
0
 def simple_bn(x,
               gamma,
               beta,
               moving_mean,
               moving_var,
               axis=1,
               epsilon=1e-5,
               shape=None):
     # expect = (x - moving_mean) / sym.sqrt(moving_var + eps) * gamma + beta
     scale = sym.elemwise_mul(1 / sym.sqrt(moving_var + epsilon), gamma)
     shift = sym.elemwise_add(
         sym.elemwise_mul(sym.negative(moving_mean), scale), beta)
     # for 2D
     num_newaxis = len(shape) - axis - 1
     if num_newaxis:
         scale = sym.expand_dims(scale, axis=1, num_newaxis=num_newaxis)
         shift = sym.expand_dims(shift, axis=1, num_newaxis=num_newaxis)
     return x * scale + shift
Exemplo n.º 12
0
def test_create_full_graph():
    x = sym.Variable("x")
    y = sym.Variable("y")
    z1 = sym.elemwise_add(x, sym.sqrt(y))
    z2 = sym.log(x)
    symbol = sym.Group([z1, z2])
    compute_graph = graph.create(symbol, need_backward=True)
    assert (compute_graph.index.num_nodes == 11)

    head_grads = [sym.Variable("g1"), sym.Variable("g2")]
    compute_graph = graph.create(symbol,
                                 need_backward=True,
                                 head_grads=head_grads)
    ir = compute_graph.ir()
    assert (compute_graph.index.num_nodes == 11)
    assert ("g1" in ir)
    assert ("g2" in ir)

    fixed_args = ["x"]
    compute_graph = graph.create(symbol,
                                 need_backward=True,
                                 fixed_args=fixed_args)
    assert (compute_graph.index.num_nodes == 8)
Exemplo n.º 13
0
def test_fusible_network(device, target):
    R""" The network is as following:
                data
                  |
                 exp
                /   \
             sqrt   log
                \   /
                b_add
                  |
                tanh
    """
    if not tvm.module.enabled(device):
        print("Skip test because %s is not enabled." % device)
        return

    batch_size = 1
    data_shape = (batch_size, 3, 224, 224)
    data = symbol.Variable('data', shape=data_shape, dtype="float32")
    shape_dict = {"data": data_shape}
    params = {}
    params["data"] = np.random.uniform(-1, 1,
                                       size=data_shape).astype("float32")

    exp = symbol.exp(data, name='exp')
    sqrt = symbol.sqrt(exp, name='sqrt')
    log = symbol.log(exp, name='log')
    ret = sqrt + log
    ret = symbol.tanh(ret)

    fallback_device = tvm.context("cpu")
    target = {"cpu": "llvm", device: target}

    # Fuse log and broadcast_add.
    op_name_device = {
        "exp": "cpu",
        "log": "cpu",
        "broadcast_add": "cpu",
        "sqrt": device,
        "elemwise_add": device,
        "tanh": device
    }
    check_annotated_graph(ret, target, op_name_device, 8, fallback_device,
                          shape_dict, params)

    # Fuse log, broadcast_add, and tanh
    op_name_device = {
        "exp": "cpu",
        "log": device,
        "broadcast_add": device,
        "sqrt": "cpu",
        "elemwise_add": "cpu",
        "tanh": device
    }
    check_annotated_graph(ret, target, op_name_device, 6, fallback_device,
                          shape_dict, params)

    # No operator will be fused.
    op_name_device = {
        "exp": device,
        "log": "cpu",
        "broadcast_add": device,
        "sqrt": "cpu",
        "elemwise_add": device,
        "tanh": "cpu"
    }
    check_annotated_graph(ret, target, op_name_device, 11, fallback_device,
                          shape_dict, params)

    # All operators will be fused.
    op_name_device = {
        "exp": device,
        "log": device,
        "broadcast_add": device,
        "sqrt": device,
        "elemwise_add": device,
        "tanh": device
    }
    check_annotated_graph(ret, target, op_name_device, 2, fallback_device,
                          shape_dict, params)

    # All operators will be fused since all of them are annotated to the
    # same device.
    op_name_device = {
        "exp": "cpu",
        "log": "cpu",
        "broadcast_add": "cpu",
        "sqrt": "cpu",
        "elemwise_add": "cpu",
        "tanh": "cpu"
    }
    check_annotated_graph(ret, target, op_name_device, 2, fallback_device,
                          shape_dict, params)

    # Fuse exp, sqrt, log, and boradcast_add
    op_name_device = {
        "exp": device,
        "log": device,
        "broadcast_add": device,
        "sqrt": device,
        "elemwise_add": device,
        "tanh": "cpu"
    }
    check_annotated_graph(ret, target, op_name_device, 4, fallback_device,
                          shape_dict, params)
import nnvm.compiler
import nnvm.symbol as sym
import numpy as np

x = sym.Variable("x")
y = sym.Variable("y")
z = sym.elemwise_add(x, sym.sqrt(y))
compute_graph = nnvm.graph.create(z)
x_np = np.array([1, 2, 3, 4]).astype("float32")
y_np = np.array([4, 4, 4, 4]).astype("float32")
shape = (4, )
deploy_graph, lib, params = nnvm.compiler.build(compute_graph, target="cuda", target_host= "llvm -target=aarch64-linux-gnu", shape={"x": shape}, params={"y": y_np}, dtype="float32")

x_np.tofile("./data/x.bin")
np.save("./data/x.nparray", x_np)
with open("./model/jetson_gpu.json", "w") as fo:
    fo.write(deploy_graph.json())
with open("./model/jetson_gpu.params", "wb") as fo:
    fo.write(nnvm.compiler.save_param_dict(params))

lib.save("./model/jetson_gpu.o")
dev_modules = lib.imported_modules
dev_modules[0].save("./model/jetson_gpu.ptx")
Exemplo n.º 15
0
# Most deep learning frameworks use computation graph to describe
# their computation. In this example, we directly use
# NNVM's API to construct the computational graph.
#
# .. note::
#
#   In a typical deep learning compilation workflow,
#   we can get the models from :any:`nnvm.frontend`
#
# The following code snippet describes :math:`z = x + \sqrt{y}`
# and creates a nnvm graph from the description.
# We can print out the graph ir to check the graph content.

x = sym.Variable("x")
y = sym.Variable("y")
z = sym.elemwise_add(x, sym.sqrt(y))
compute_graph = nnvm.graph.create(z)
print("-------compute graph-------")
print(compute_graph.ir())

######################################################################
# Compile
# -------
# We can call :any:`nnvm.compiler.build` to compile the graph.
# The build function takes a shape parameter which specifies the
# input shape requirement. Here we only need to pass in shape of ``x``
# and the other one will be inferred automatically by NNVM.
#
# The function returns three values. ``deploy_graph`` contains
# the final compiled graph structure. ``lib`` is a :any:`tvm.module.Module`
# that contains compiled CUDA functions. We do not need the ``params``