Exemple #1
0
def test_clip():
    x = sym.Variable("x")
    a_min = 0.2
    a_max = 0.75
    y = sym.clip(x, a_min=a_min, a_max=a_max)

    def forward(x):
        return np.clip(x, a_min=a_min, a_max=a_max)

    def backward(head_grads, x):
        mask1 = np.greater_equal(x, a_min).astype("float")
        mask2 = np.less_equal(x, a_max).astype("float")
        return [head_grads * mask1 * mask2]

    shape = {'x': (3, 4, 5)}
    check_function(y, forward, backward, shape=shape)
Exemple #2
0
def test_clip():
    x = sym.Variable("x")
    a_min=0.2
    a_max=0.75
    y = sym.clip(x, a_min=a_min, a_max=a_max)

    def forward(x):
        return np.clip(x, a_min=a_min, a_max=a_max)

    def backward(head_grads, x):
        mask1 = np.greater_equal(x, a_min).astype("float")
        mask2 = np.less_equal(x, a_max).astype("float")
        return [head_grads * mask1 * mask2]

    shape = {'x': (3, 4, 5)}
    check_function(y, forward, backward, shape=shape)
Exemple #3
0
def test_clip():
    x = sym.Variable("x")
    a_min = 0.2
    a_max = 0.75
    y = sym.clip(x, a_min=a_min, a_max=a_max)

    def forward(x):
        return np.clip(x, a_min=a_min, a_max=a_max)

    def backward(head_grads, x):
        mask1 = np.greater_equal(x, a_min).astype("float")
        mask2 = np.less_equal(x, a_max).astype("float")
        return [head_grads * mask1 * mask2]

    dtype = "float32"
    inputs = [('x', (3, 4, 5), x)]
    helper(y, inputs, dtype, forward, backward)
Exemple #4
0
# Most deep learning frameworks use computation graph to describe
# their computation. In this example, we directly use
# NNVM's API to construct the computational graph.
#
# .. note::
#
#   In a typical deep learning compilation workflow,
#   we can get the models from :any:`nnvm.frontend`
#
# The following code snippet describes :math:`z = x + \sqrt{y}`
# and creates a nnvm graph from the description.
# We can print out the graph ir to check the graph content.

x = sym.Variable("data")
y = sym.dense(x, units=16, use_bias=False)
z = sym.clip(y, a_max=100, a_min=0)
z = sym.dense(z, units=10, use_bias=False)
z = sym.clip(z, a_max=100, a_min=0)
compute_graph = nnvm.graph.create(z)

shape = (1, 28)
with nnvm.compiler.build_config(opt_level=0):
    deploy_graph, lib, params = nnvm.compiler.build(compute_graph,
                                                    target="cuda",
                                                    shape={"data": shape},
                                                    dtype="int32")

with open('/tmp/start_cuda.json', "w") as fout:
    fout.write(deploy_graph.json())
exit()