Пример #1
0
 def test_simple_model(self):
     model = model_helper.ModelHelper(name="mnist")
     data, label = brew.image_input(model, ["db"], ["data", "label"],
                                    order="NCHW",
                                    use_gpu_transform=False,
                                    is_test=0)
     with core.NameScope("conv1"):
         conv1 = brew.conv(model,
                           data,
                           'conv1',
                           dim_in=1,
                           dim_out=20,
                           kernel=5)
         # Image size: 24 x 24 -> 12 x 12
         pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
         # Image size: 12 x 12 -> 8 x 8
         conv2 = brew.conv(model,
                           pool1,
                           'conv2',
                           dim_in=20,
                           dim_out=100,
                           kernel=5)
         # Image size: 8 x 8 -> 4 x 4
         pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
     with core.NameScope("classifier"):
         # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
         fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
         relu = brew.relu(model, fc3, fc3)
         pred = brew.fc(model, relu, 'pred', 500, 10)
         softmax = brew.softmax(model, pred, 'softmax')
         xent = model.LabelCrossEntropy([softmax, label], 'xent')
         # compute the expected loss
         loss = model.AveragedLoss(xent, "loss")
     model.net.RunAllOnGPU()
     model.param_init_net.RunAllOnGPU()
     model.AddGradientOperators([loss], skip=1)
     blob_name_tracker = {}
     graph = tb.model_to_graph_def(
         model,
         blob_name_tracker=blob_name_tracker,
         shapes={},
         show_simplified=False,
     )
     self.assertEqual(
         blob_name_tracker['GRADIENTS/conv1/conv1_b_grad'],
         'conv1/conv1_b_grad',
     )
     self.maxDiff = None
     # We can't guarantee the order in which they appear, so we sort
     # both before we compare them
     sep = "node {"
     expected = "\n".join(
         sorted(sep + "\n  " + part.strip()
                for part in EXPECTED_MNIST.strip().split(sep)
                if part.strip()))
     actual = "\n".join(
         sorted(sep + "\n  " + part.strip()
                for part in str(graph).strip().split(sep) if part.strip()))
     self.assertMultiLineEqual(actual, expected)
Пример #2
0
    def test_simple_model(self):
        model = model_helper.ModelHelper(name="mnist")
        # how come those inputs don't break the forward pass =.=a
        workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
        workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))

        with core.NameScope("conv1"):
            conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
            # Image size: 24 x 24 -> 12 x 12
            pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
            # Image size: 12 x 12 -> 8 x 8
            conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
            # Image size: 8 x 8 -> 4 x 4
            pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
        with core.NameScope("classifier"):
            # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
            fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
            relu = brew.relu(model, fc3, fc3)
            pred = brew.fc(model, relu, 'pred', 500, 10)
            softmax = brew.softmax(model, pred, 'softmax')
            xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
            # compute the expected loss
            loss = model.AveragedLoss(xent, "loss")
        model.net.RunAllOnMKL()
        model.param_init_net.RunAllOnMKL()
        model.AddGradientOperators([loss], skip=1)
        blob_name_tracker = {}
        graph = tb.model_to_graph_def(
            model,
            blob_name_tracker=blob_name_tracker,
            shapes={},
            show_simplified=False,
        )
        #self.assertEqual(
        #    blob_name_tracker['GRADIENTS/conv1/conv1_b_grad'],
        #    'conv1/conv1_b_grad',
        #)
        self.maxDiff = None
        # We can't guarantee the order in which they appear, so we sort
        # both before we compare them
        with open('tests/expect/caffe_mnist.expect') as f:
            EXPECTED_MNIST = f.read()
        sep = "node {"
        expected = "\n".join(sorted(
            sep + "\n  " + part.strip()
            for part in EXPECTED_MNIST.strip().split(sep)
            if part.strip()
        ))
        actual = "\n".join(sorted(
            sep + "\n  " + part.strip()
            for part in str(graph).strip().split(sep)
            if part.strip()
        ))
Пример #3
0
    def test_simple_model(self):
        model = model_helper.ModelHelper(name="mnist")
        # how come those inputs don't break the forward pass =.=a
        workspace.FeedBlob("data",
                           np.random.randn(1, 3, 64, 64).astype(np.float32))
        workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))

        with core.NameScope("conv1"):
            conv1 = brew.conv(model,
                              "data",
                              'conv1',
                              dim_in=1,
                              dim_out=20,
                              kernel=5)
            # Image size: 24 x 24 -> 12 x 12
            pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
            # Image size: 12 x 12 -> 8 x 8
            conv2 = brew.conv(model,
                              pool1,
                              'conv2',
                              dim_in=20,
                              dim_out=100,
                              kernel=5)
            # Image size: 8 x 8 -> 4 x 4
            pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
        with core.NameScope("classifier"):
            # 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
            fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
            relu = brew.relu(model, fc3, fc3)
            pred = brew.fc(model, relu, 'pred', 500, 10)
            softmax = brew.softmax(model, pred, 'softmax')
            xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
            # compute the expected loss
            loss = model.AveragedLoss(xent, "loss")
        model.net.RunAllOnMKL()
        model.param_init_net.RunAllOnMKL()
        model.AddGradientOperators([loss], skip=1)
        blob_name_tracker = {}
        graph = tb.model_to_graph_def(
            model,
            blob_name_tracker=blob_name_tracker,
            shapes={},
            show_simplified=False,
        )

        compare_proto(graph, self)
Пример #4
0
 def test_simple_cnnmodel(self):
     model = cnn.CNNModelHelper("NCHW", name="overfeat")
     workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
     workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
     with core.NameScope("conv1"):
         conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
         relu1 = model.Relu(conv1, conv1)
         pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
     with core.NameScope("classifier"):
         fc = model.FC(pool1, "fc", 4096, 1000)
         pred = model.Softmax(fc, "pred")
         xent = model.LabelCrossEntropy([pred, "label"], "xent")
         loss = model.AveragedLoss(xent, "loss")
     model.net.RunAllOnMKL()
     model.param_init_net.RunAllOnMKL()
     model.AddGradientOperators([loss], skip=1)
     blob_name_tracker = {}
     graph = tb.model_to_graph_def(
         model,
         blob_name_tracker=blob_name_tracker,
         shapes={},
         show_simplified=False,
     )
     #self.assertEqual(
     #    blob_name_tracker['GRADIENTS/conv1/conv1_b_grad'],
     #    'conv1/conv1_b_grad',
     #)
     self.maxDiff = None
     # We can't guarantee the order in which they appear, so we sort
     # both before we compare them
     with open('tests/expect/caffe_overfeat.expect') as f:
         EXPECTED_CNN = f.read()
     sep = "node {"
     expected = "\n".join(sorted(
         sep + "\n  " + part.strip()
         for part in EXPECTED_CNN.strip().split(sep)
         if part.strip()
     ))
     actual = "\n".join(sorted(
         sep + "\n  " + part.strip()
         for part in str(graph).strip().split(sep)
         if part.strip()
     ))
Пример #5
0
 def test_simple_cnnmodel(self):
     model = cnn.CNNModelHelper("NCHW", name="overfeat")
     data, label = model.ImageInput(["db"], ["data", "label"], is_test=0)
     with core.NameScope("conv1"):
         conv1 = model.Conv(data, "conv1", 3, 96, 11, stride=4)
         relu1 = model.Relu(conv1, conv1)
         pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
     with core.NameScope("classifier"):
         fc = model.FC(pool1, "fc", 4096, 1000)
         pred = model.Softmax(fc, "pred")
         xent = model.LabelCrossEntropy([pred, label], "xent")
         loss = model.AveragedLoss(xent, "loss")
     model.net.RunAllOnGPU()
     model.param_init_net.RunAllOnGPU()
     model.AddGradientOperators([loss], skip=1)
     with SummaryWriter(filename_suffix='.test') as writer:
         writer.add_graph(model)
     blob_name_tracker = {}
     graph = tb.model_to_graph_def(
         model,
         blob_name_tracker=blob_name_tracker,
         shapes={},
         show_simplified=False,
     )
     self.assertEqual(
         blob_name_tracker['GRADIENTS/conv1/conv1_b_grad'],
         'conv1/conv1_b_grad',
     )
     self.maxDiff = None
     # We can't guarantee the order in which they appear, so we sort
     # both before we compare them
     sep = "node {"
     expected = "\n".join(sorted(
         sep + "\n  " + part.strip()
         for part in EXPECTED_CNN.strip().split(sep)
         if part.strip()
     ))
     actual = "\n".join(sorted(
         sep + "\n  " + part.strip()
         for part in str(graph).strip().split(sep)
         if part.strip()
     ))
     self.assertMultiLineEqual(actual, expected)
Пример #6
0
    def test_simple_cnnmodel(self):
        model = cnn.CNNModelHelper("NCHW", name="overfeat")
        workspace.FeedBlob("data",
                           np.random.randn(1, 3, 64, 64).astype(np.float32))
        workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
        with core.NameScope("conv1"):
            conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
            relu1 = model.Relu(conv1, conv1)
            pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
        with core.NameScope("classifier"):
            fc = model.FC(pool1, "fc", 4096, 1000)
            pred = model.Softmax(fc, "pred")
            xent = model.LabelCrossEntropy([pred, "label"], "xent")
            loss = model.AveragedLoss(xent, "loss")

        blob_name_tracker = {}
        graph = tb.model_to_graph_def(
            model,
            blob_name_tracker=blob_name_tracker,
            shapes={},
            show_simplified=False,
        )

        compare_proto(graph, self)