def do_nhwc_conv(test_case, with_cuda, with_bias):
    x = flow.randn(2, 3, 4, 5)
    conv = flow.nn.Conv2d(3, 4, 2, 1, bias=with_bias)
    if with_cuda:
        x = x.cuda()
        conv.to("cuda")

    eager_conv_x = flow.relu(conv(x)) + flow.relu(conv(x))

    class GraphToRun(flow.nn.Graph):
        def __init__(self):
            super().__init__()
            self.conv = conv

        def build(self, x):
            return flow.relu(self.conv(x)) + flow.relu(self.conv(x))

    graph_to_run = GraphToRun()
    lazy_conv_x = graph_to_run(x)
    print(eager_conv_x.numpy().flatten()[:10])
    print(lazy_conv_x.numpy().flatten()[:10])
    test_case.assertTrue(
        np.allclose(eager_conv_x.numpy(),
                    lazy_conv_x.numpy(),
                    rtol=1e-5,
                    atol=1e-5))
예제 #2
0
def do_nhwc_bacth_norm(test_case, with_cuda):
    x = flow.randn(2, 3, 4, 5)
    bn = flow.nn.BatchNorm2d(3)
    if with_cuda:
        x = x.cuda()
        bn.to("cuda")

    eager_batch_norm_res = flow.relu(bn(x))

    class GraphToRun(flow.nn.Graph):
        def __init__(self):
            super().__init__()
            self.m = bn

        def build(self, x):
            return flow.relu(self.m(x))

    graph_to_run = GraphToRun()
    lazy_batch_norm_res = graph_to_run(x)
    test_case.assertTrue(
        np.allclose(
            eager_batch_norm_res.numpy(),
            lazy_batch_norm_res.numpy(),
            rtol=1e-5,
            atol=1e-5,
        ))
예제 #3
0
def _relu_inplace(self):
    return flow.relu(self, inplace=True)
예제 #4
0
def _relu(self, inplace=False):
    return flow.relu(self, inplace=inplace)
예제 #5
0
 def build(self, x):
     return flow.relu(self.m(x))
 def build(self, x):
     return flow.relu(self.conv(x)) + flow.relu(self.conv(x))