Ejemplo n.º 1
0
Archivo: LeNet.py Proyecto: sj1104/Het
def conv_pool(x, in_channel, out_channel, name):
    weight = init.random_normal(shape=(out_channel, in_channel, 5, 5),
                                stddev=0.1,
                                name=name + '_weight')
    x = ad.conv2d_op(x, weight, padding=2, stride=1)
    x = ad.relu_op(x)
    x = ad.max_pool2d_op(x, kernel_H=2, kernel_W=2, padding=0, stride=2)
    return x
Ejemplo n.º 2
0
def test_Conv2d():
    X = ad.Variable(name="X")
    W1 = init.random_normal((32, 1, 5, 5), stddev=0.1, name='W1')
    y = ad.conv2d_op(X, W1, padding=2, stride=1)
    executor = ad.Executor([y], ctx=ctx)
    X_val = rand.normal(scale=0.1,
                        size=(batch_size, 1, 28, 28)).astype(np.float32)
    res = executor.run(feed_dict={X: X_val})
    Check(executor, res, [X], [y], [X_val])
    print(sys._getframe().f_code.co_name, 'pass!')
Ejemplo n.º 3
0
    def version_1(cls, node, tensor_dict, **kwargs):
        x = tensor_dict[node.input_tensor_names[0]]
        in_weights = tensor_dict[node.input_tensor_names[1]]
        in_weights_shape = list(in_weights.shape)
        paddings = node.get_attr_value('pads')
        strides = node.get_attr_value('strides')
        assert len(set(paddings)) == 1 and len(set(strides)) == 1

        y = ad.conv2d_op(x, in_weights, padding=paddings[0], stride=strides[0])
        tensor_dict[node.output_tensor_names[0]] = y
        return y
Ejemplo n.º 4
0
def cnn(executor_ctx=None, num_epochs=10, print_loss_val_each_epoch=False):

    print("Build CNN model...")

    W1 = init.random_normal((32, 1, 5, 5), stddev=0.1, name='W1')
    W2 = init.random_normal((64, 32, 5, 5), stddev=0.1, name='W2')
    W3 = init.random_normal((7 * 7 * 64, 10), stddev=0.1, name='W3')
    b3 = init.random_normal((10, ), stddev=0.1, name='b3')

    X = ad.Variable(name="X")

    z1 = ad.conv2d_op(X, W1, padding=2, stride=1)
    z2 = ad.relu_op(z1)
    z3 = ad.avg_pool2d_op(z2, kernel_H=2, kernel_W=2, padding=0, stride=2)

    z4 = ad.conv2d_op(z3, W2, padding=2, stride=1)
    z5 = ad.relu_op(z4)
    z6 = ad.avg_pool2d_op(z5, kernel_H=2, kernel_W=2, padding=0, stride=2)

    z6_flat = ad.array_reshape_op(z6, (-1, 7 * 7 * 64))
    y = ad.matmul_op(z6_flat, W3) + b3

    executor = ad.Executor([y], ctx=executor_ctx)

    rand = np.random.RandomState(seed=123)
    X_val = rand.normal(scale=0.1,
                        size=(batch_size, 1, 28, 28)).astype(np.float32)

    ath = executor.run(feed_dict={X: X_val})

    hx.hetu2onnx.export(executor, [X], [y], 'ath.onnx')
    #
    #
    sess = rt.InferenceSession("ath.onnx")
    input = sess.get_inputs()[0].name

    pre = sess.run(None, {input: X_val.astype(np.float32)})[0]

    np.testing.assert_allclose(ath[0].asnumpy(), pre, rtol=1e-2)
Ejemplo n.º 5
0
def conv_bn_relu(x, in_channel, out_channel, name):
    weight = init.random_normal(shape=(out_channel, in_channel, 3, 3),
                                stddev=0.1,
                                name=name + '_weight')
    bn_scale = init.random_normal(shape=(1, out_channel, 1, 1),
                                  stddev=0.1,
                                  name=name + '_bn_scale')
    bn_bias = init.random_normal(shape=(1, out_channel, 1, 1),
                                 stddev=0.1,
                                 name=name + '_bn_bias')

    conv = ad.conv2d_op(x, weight, padding=1, stride=1)
    bn = ad.batch_normalization_op(conv, bn_scale, bn_bias)
    act = ad.relu_op(bn)
    return act
Ejemplo n.º 6
0
def conv_bn_relu_pool(x,
                      in_channel,
                      out_channel,
                      name,
                      with_relu=True,
                      with_pool=False):
    weight = init.random_normal(shape=(out_channel, in_channel, 3, 3),
                                stddev=0.1,
                                name=name + '_weight')
    bn_scale = init.random_normal(shape=(1, out_channel, 1, 1),
                                  stddev=0.1,
                                  name=name + '_bn_scale')
    bn_bias = init.random_normal(shape=(1, out_channel, 1, 1),
                                 stddev=0.1,
                                 name=name + '_bn_bias')
    x = ad.conv2d_op(x, weight, stride=1, padding=1)
    x = ad.batch_normalization_op(x, bn_scale, bn_bias)
    if with_relu:
        x = ad.relu_op(x)
    if with_pool:
        x = ad.max_pool2d_op(x, kernel_H=2, kernel_W=2, stride=2, padding=0)
    return x
Ejemplo n.º 7
0
Archivo: CNN.py Proyecto: sj1104/Het
def conv_relu_avg(x, shape):
    weight = init.random_normal(shape=shape, stddev=0.1)
    x = ad.conv2d_op(x, weight, padding=2, stride=1)
    x = ad.relu_op(x)
    x = ad.avg_pool2d_op(x, kernel_H=2, kernel_W=2, padding=0, stride=2)
    return x
Ejemplo n.º 8
0
def conv2d(x, in_channel, out_channel, stride=1, padding=1, name=''):
    weight = init.random_normal(shape=(out_channel, in_channel, 3, 3),
                                stddev=0.1,
                                name=name + '_weight')
    x = ad.conv2d_op(x, weight, stride=stride, padding=padding)
    return x