Exemplo n.º 1
0
def test_caffe_model_forward_gradient(tmpdir):
    import caffe
    from caffe import layers as L

    bounds = (0, 255)
    channels = num_classes = 1000

    net_spec = caffe.NetSpec()
    net_spec.data = L.Input(name="data",
                            shape=dict(dim=[1, num_classes, 5, 5]))
    net_spec.reduce_1 = L.Reduction(net_spec.data,
                                    reduction_param={
                                        "operation": 4,
                                        "axis": 3
                                    })
    net_spec.output = L.Reduction(net_spec.reduce_1,
                                  reduction_param={
                                      "operation": 4,
                                      "axis": 2
                                  })
    net_spec.label = L.Input(name="label", shape=dict(dim=[1]))
    net_spec.loss = L.SoftmaxWithLoss(net_spec.output, net_spec.label)
    wf = tmpdir.mkdir("test_models_caffe").join(
        "test_caffe_model_gradient_proto_{}.prototxt".format(num_classes))
    wf.write("force_backward: true\n" + str(net_spec.to_proto()))
    preprocessing = (
        np.arange(num_classes)[:, None, None],
        np.random.uniform(size=(channels, 5, 5)) + 1,
    )
    net = caffe.Net(str(wf), caffe.TEST)
    model = CaffeModel(net, bounds=bounds, preprocessing=preprocessing)

    epsilon = 1e-2

    np.random.seed(23)
    test_images = np.random.rand(5, channels, 5, 5).astype(np.float32)
    test_labels = [7] * 5

    _, g1 = model.forward_and_gradient_one(test_images, test_labels)

    l1 = model._loss_fn(test_images - epsilon / 2 * g1, test_labels)
    l2 = model._loss_fn(test_images + epsilon / 2 * g1, test_labels)

    assert np.all(1e4 * (l2 - l1) > 1)

    # make sure that gradient is numerically correct
    np.testing.assert_array_almost_equal(
        1e4 * (l2 - l1),
        1e4 * epsilon *
        np.linalg.norm(g1.reshape(len(g1), -1, g1.shape[-1]), axis=(1, 2))**2,
        decimal=1,
    )
Exemplo n.º 2
0
def bn_model_caffe(request, tmpdir):
    """Same as bn_model but with Caffe."""

    import caffe
    from caffe import layers as L

    bounds = (0, 1)
    num_classes = channels = getattr(request, "param", 1000)

    net_spec = caffe.NetSpec()
    net_spec.data = L.Input(name="data",
                            shape=dict(dim=[1, channels, 5, 5]))
    net_spec.reduce_1 = L.Reduction(net_spec.data,
                                    reduction_param={"operation": 4,
                                                     "axis": 3})
    net_spec.output = L.Reduction(net_spec.reduce_1,
                                  reduction_param={"operation": 4,
                                                   "axis": 2})
    net_spec.label = L.Input(name="label", shape=dict(dim=[1]))
    net_spec.loss = L.SoftmaxWithLoss(net_spec.output, net_spec.label)
    wf = tmpdir.mkdir("test_models_caffe_fixture")\
               .join("test_caffe_{}.prototxt".format(num_classes))
    wf.write("force_backward: true\n" + str(net_spec.to_proto()))
    net = caffe.Net(str(wf), caffe.TEST)
    model = CaffeModel(net, bounds=bounds)
    return model
 def create_fmodel(cls, cfg):
     model = cls(**cfg["cfg"])
     bounds = tuple(cfg.get("bounds", [0, 1]))
     preprocessing = tuple(cfg.get("preprocessing", [0, 255]))
     fmodel = CaffeModel(model.net,
                         bounds=bounds,
                         channel_axis=1,
                         preprocessing=preprocessing,
                         data_blob_name=cfg.get("data_blob_name", "data"),
                         label_blob_name=cfg.get("label_blob_name",
                                                 "label"),
                         output_blob_name=cfg.get("output_blob_name",
                                                  "output"))
     return fmodel
Exemplo n.º 4
0
def test_caffe_model_preprocessing_shape_change(tmpdir):
    import caffe
    from caffe import layers as L

    bounds = (0, 255)
    channels = num_classes = 1000

    net_spec = caffe.NetSpec()
    net_spec.data = L.Input(name="data",
                            shape=dict(dim=[1, num_classes, 5, 5]))
    net_spec.reduce_1 = L.Reduction(net_spec.data,
                                    reduction_param={
                                        "operation": 4,
                                        "axis": 3
                                    })
    net_spec.output = L.Reduction(net_spec.reduce_1,
                                  reduction_param={
                                      "operation": 4,
                                      "axis": 2
                                  })
    net_spec.label = L.Input(name="label", shape=dict(dim=[1]))
    net_spec.loss = L.SoftmaxWithLoss(net_spec.output, net_spec.label)
    wf = tmpdir.mkdir("test_models_caffe")\
               .join("test_caffe_model_preprocessing_shape_change_{}.prototxt"
                     .format(num_classes))
    wf.write("force_backward: true\n" + str(net_spec.to_proto()))
    net = caffe.Net(str(wf), caffe.TEST)
    model1 = CaffeModel(net, bounds=bounds)

    def preprocessing2(x):
        if x.ndim == 3:
            x = np.transpose(x, axes=(2, 0, 1))
        elif x.ndim == 4:
            x = np.transpose(x, axes=(0, 3, 1, 2))

        def grad(dmdp):
            assert dmdp.ndim == 3
            dmdx = np.transpose(dmdp, axes=(1, 2, 0))
            return dmdx

        return x, grad

    model2 = CaffeModel(net, bounds=bounds, preprocessing=preprocessing2)

    np.random.seed(22)
    test_images_nhwc = np.random.rand(2, 5, 5, channels).astype(np.float32)
    test_images_nchw = np.transpose(test_images_nhwc, (0, 3, 1, 2))

    p1 = model1.forward(test_images_nchw)
    p2 = model2.forward(test_images_nhwc)

    assert np.all(p1 == p2)

    p1 = model1.forward_one(test_images_nchw[0])
    p2 = model2.forward_one(test_images_nhwc[0])

    assert np.all(p1 == p2)

    g1 = model1.gradient_one(test_images_nchw[0], 3)
    assert g1.ndim == 3
    g1 = np.transpose(g1, (1, 2, 0))
    g2 = model2.gradient_one(test_images_nhwc[0], 3)

    np.testing.assert_array_almost_equal(g1, g2)