Beispiel #1
0
    def test_table_label(self):
        """
        Test for table as label in Sample.
        For test purpose only.
        """
        def gen_rand_sample():
            features1 = np.random.uniform(0, 1, 3)
            features2 = np.random.uniform(0, 1, 3)
            label = np.array((2 * (features1 + features2)).sum() + 0.4)
            return Sample.from_ndarray([features1, features2], [label, label])

        training_data = self.sc.parallelize(range(0, 50)).map(
            lambda i: gen_rand_sample())

        model_test = Sequential()
        branches = ParallelTable()
        branch1 = Sequential().add(Linear(3, 1)).add(Tanh())
        branch2 = Sequential().add(Linear(3, 1)).add(ReLU())
        branches.add(branch1).add(branch2)
        model_test.add(branches)

        optimizer = Optimizer.create(
            model=model_test,
            training_set=training_data,
            criterion=MarginRankingCriterion(),
            optim_method=SGD(),
            end_trigger=MaxEpoch(5),
            batch_size=32)
        optimizer.optimize()
    def test_predict(self):
        np.random.seed(100)
        total_length = 6
        features = np.random.uniform(0, 1, (total_length, 2))
        label = (features).sum() + 0.4
        predict_data = self.sc.parallelize(range(0, total_length)).map(
            lambda i: Sample.from_ndarray(features[i], label))
        model = Linear(2, 1).set_init_method(Xavier(), Zeros()) \
            .set_name("linear1").set_seed(1234).reset()
        predict_result = model.predict(predict_data)
        p = predict_result.take(6)
        ground_label = np.array([[-0.47596836], [-0.37598032], [-0.00492062],
                                 [-0.5906958], [-0.12307882], [-0.77907401]], dtype="float32")
        for i in range(0, total_length):
            assert_allclose(p[i], ground_label[i], atol=1e-6, rtol=0)

        predict_result_with_batch = model.predict(features=predict_data,
                                                  batch_size=4)
        p_with_batch = predict_result_with_batch.take(6)
        for i in range(0, total_length):
            assert_allclose(p_with_batch[i], ground_label[i], atol=1e-6, rtol=0)

        predict_class = model.predict_class(predict_data)
        predict_labels = predict_class.take(6)
        for i in range(0, total_length):
            assert predict_labels[i] == 1
Beispiel #3
0
    def test_predict(self):
        np.random.seed(100)
        total_length = 6
        features = np.random.uniform(0, 1, (total_length, 2))
        label = (features).sum() + 0.4
        predict_data = self.sc.parallelize(range(0, total_length)).map(
            lambda i: Sample.from_ndarray(features[i], label))
        model = Linear(2, 1).set_init_method(Xavier(), Zeros()) \
            .set_name("linear1").set_seed(1234).reset()
        predict_result = model.predict(predict_data)
        p = predict_result.take(6)
        ground_label = np.array([[-0.47596836], [-0.37598032], [-0.00492062],
                                 [-0.5906958], [-0.12307882], [-0.77907401]],
                                dtype="float32")
        for i in range(0, total_length):
            assert_allclose(p[i], ground_label[i], atol=1e-6, rtol=0)

        predict_result_with_batch = model.predict(features=predict_data,
                                                  batch_size=4)
        p_with_batch = predict_result_with_batch.take(6)
        for i in range(0, total_length):
            assert_allclose(p_with_batch[i],
                            ground_label[i],
                            atol=1e-6,
                            rtol=0)

        predict_class = model.predict_class(predict_data)
        predict_labels = predict_class.take(6)
        for i in range(0, total_length):
            assert predict_labels[i] == 1
Beispiel #4
0
    def test_local_predict_multiple_input(self):
        l1 = Linear(3, 2)()
        l2 = Linear(3, 3)()
        joinTable = JoinTable(dimension=1, n_input_dims=1)([l1, l2])
        model = Model(inputs=[l1, l2], outputs=joinTable)
        result = model.predict_local([np.ones([4, 3]), np.ones([4, 3])])
        assert result.shape == (4, 5)
        result2 = model.predict_class([np.ones([4, 3]), np.ones([4, 3])])
        assert result2.shape == (4, )

        result3 = model.predict_local([
            JTensor.from_ndarray(np.ones([4, 3])),
            JTensor.from_ndarray(np.ones([4, 3]))
        ])
        assert result3.shape == (4, 5)
        result4 = model.predict_class([
            JTensor.from_ndarray(np.ones([4, 3])),
            JTensor.from_ndarray(np.ones([4, 3]))
        ])
        assert result4.shape == (4, )
        result5 = model.predict_local([
            JTensor.from_ndarray(np.ones([4, 3])),
            JTensor.from_ndarray(np.ones([4, 3]))
        ],
                                      batch_size=2)
        assert result5.shape == (4, 5)
Beispiel #5
0
    def test_init_method(self):
        initializers = [
            Zeros(),
            Ones(),
            ConstInitMethod(5),
            RandomUniform(-1, 1),
            RandomNormal(0, 1),
            None
        ]
        special_initializers = [
            MsraFiller(False),
            Xavier(),
            RandomUniform(),
        ]

        layers = [
            SpatialConvolution(6, 12, 5, 5),
            SpatialShareConvolution(1, 1, 1, 1),
            LookupTable(1, 1, 1e-5, 1e-5, 1e-5, True),
            Bilinear(1, 1, 1, True),
            Cosine(2, 3),
            SpatialFullConvolution(1, 1, 1, 1),
            Add(1),
            Linear(100, 10),
            CMul([1, 2]),
            Mul(),
            PReLU(1),
            Euclidean(1, 1, True),
            SpatialDilatedConvolution(1, 1, 1, 1),
            SpatialBatchNormalization(1),
            BatchNormalization(1, 1e-5, 1e-5, True),
        ]

        special_layers = [
            SpatialConvolution(6, 12, 5, 5),
            SpatialShareConvolution(1, 1, 1, 1),
            Cosine(2, 3),
            SpatialFullConvolution(1, 1, 1, 1),
            Add(1),
            Linear(100, 10),
            CMul([1, 2]),
            Mul(),
            PReLU(1),
            Euclidean(1, 1, True),
            SpatialDilatedConvolution(1, 1, 1, 1),
            SpatialBatchNormalization(1),
            BatchNormalization(1, 1e-5, 1e-5, True),
        ]
        for layer in layers:
            for init1 in initializers:
                for init2 in initializers:
                    layer.set_init_method(init1, init2)

        for layer in special_layers:
            for init1 in special_initializers:
                for init2 in special_initializers:
                    layer.set_init_method(init1, init2)

        SpatialFullConvolution(1, 1, 1, 1).set_init_method(BilinearFiller(), Zeros())
Beispiel #6
0
 def test_save_graph_topology(self):
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     output2 = Threshold(10.0)(cadd)
     model = Model([fc1, fc2], [output1, output2])
     model.save_graph_topology(tempfile.mkdtemp())
Beispiel #7
0
 def test_get_node(self):
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     fc1.element().set_name("fc1")
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     model = Model([fc1, fc2], [output1])
     res = model.node("fc1")
     assert res.element().name() == "fc1"
 def test_get_node(self):
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     fc1.element().set_name("fc1")
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     model = Model([fc1, fc2], [output1])
     res = model.node("fc1")
     assert res.element().name() == "fc1"
 def test_set_seed(self):
     w_init = Xavier()
     b_init = Zeros()
     l1 = Linear(10, 20).set_init_method(w_init, b_init).set_name("linear1").set_seed(
         1234).reset()  # noqa
     l2 = Linear(10, 20).set_init_method(w_init, b_init).set_name("linear2").set_seed(
         1234).reset()  # noqa
     p1 = l1.parameters()
     p2 = l2.parameters()
     assert (p1["linear1"]["weight"] == p2["linear2"]["weight"]).all()  # noqa
    def test_model_broadcast(self):

        init_executor_gateway(self.sc)
        model = Linear(3, 2)
        broadcasted = broadcast_model(self.sc, model)
        input_data = np.random.rand(3)
        output = self.sc.parallelize([input_data], 1)\
            .map(lambda x: broadcasted.value.forward(x)).first()
        expected = model.forward(input_data)

        assert_allclose(output, expected)
Beispiel #11
0
    def test_model_broadcast(self):

        init_executor_gateway(self.sc)
        model = Linear(3, 2)
        broadcasted = broadcast_model(self.sc, model)
        input_data = np.random.rand(3)
        output = self.sc.parallelize([input_data], 1)\
            .map(lambda x: broadcasted.value.forward(x)).first()
        expected = model.forward(input_data)

        assert_allclose(output, expected)
Beispiel #12
0
 def test_graph_preprocessor(self):
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     cadd = CAddTable()([fc1, fc2])
     preprocessor = Model([fc1, fc2], [cadd])
     relu = ReLU()()
     fc3 = Linear(2, 1)(relu)
     trainable = Model([relu], [fc3])
     model = Model(preprocessor, trainable)
     model.forward([np.array([0.1, 0.2, -0.3, -0.4]), np.array([0.5, 0.4, -0.2, -0.1])])
     model.backward([np.array([0.1, 0.2, -0.3, -0.4]), np.array([0.5, 0.4, -0.2, -0.1])],
                    np.array([1.0]))
Beispiel #13
0
 def _create_cnn_model():
     model = Sequential()
     model.add(SpatialConvolution(3, 1, 5, 5))
     model.add(View([1 * 220 * 220]))
     model.add(Linear(1 * 220 * 220, 20))
     model.add(LogSoftMax())
     return model
Beispiel #14
0
    def test_local_optimizer_predict(self):
        feature_num = 2
        data_len = 1000
        batch_size = 32
        epoch_num = 500

        X_ = np.random.uniform(0, 1, (data_len, feature_num))
        y_ = (2 * X_).sum(1) + 0.4
        model = Sequential()
        l1 = Linear(feature_num, 1)
        model.add(l1)

        localOptimizer = Optimizer.create(
            model=model,
            training_set=(X_, y_),
            criterion=MSECriterion(),
            optim_method=SGD(learningrate=1e-2),
            end_trigger=MaxEpoch(epoch_num),
            batch_size=batch_size)

        trained_model = localOptimizer.optimize()
        trained_model = model
        w = trained_model.get_weights()
        assert_allclose(w[0], np.array([2, 2]).reshape([1, 2]), rtol=1e-1)
        assert_allclose(w[1], np.array([0.4]), rtol=1e-1)

        predict_result = trained_model.predict_local(X_)
        assert_allclose(y_, predict_result.reshape((data_len,)), rtol=1e-1)
Beispiel #15
0
    def test_load_optim_method(self):
        FEATURES_DIM = 2
        data_len = 100
        batch_size = 32
        epoch_num = 5

        def gen_rand_sample():
            features = np.random.uniform(0, 1, (FEATURES_DIM))
            label = (2 * features).sum() + 0.4
            return Sample.from_ndarray(features, label)

        trainingData = self.sc.parallelize(range(0, data_len)).map(lambda i: gen_rand_sample())
        model = Sequential()
        l1 = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()).set_name("linear1")
        model.add(l1)

        sgd = SGD(learningrate=0.01, learningrate_decay=0.0002, weightdecay=0.0,
                  momentum=0.0, dampening=0.0, nesterov=False,
                  leaningrate_schedule=Poly(0.5, int((data_len / batch_size) * epoch_num)))

        tmp_path = tempfile.mktemp()
        sgd.save(tmp_path, True)
        optim_method = OptimMethod.load(tmp_path)
        assert optim_method.learningRate() == sgd.value.learningRate()
        assert optim_method.momentum() == sgd.value.momentum()
        assert optim_method.nesterov() == sgd.value.nesterov()

        optimizer = Optimizer(
            model=model,
            training_rdd=trainingData,
            criterion=MSECriterion(),
            optim_method=optim_method,
            end_trigger=MaxEpoch(epoch_num),
            batch_size=batch_size)
        optimizer.optimize()
Beispiel #16
0
    def test_multiple_input(self):
        """
        Test training data of samples with several tensors as feature
        using a sequential model with multiple inputs.
        """
        FEATURES_DIM = 2
        data_len = 100
        batch_size = 32
        epoch_num = 5

        def gen_rand_sample():
            features1 = np.random.uniform(0, 1, (FEATURES_DIM))
            features2 = np.random.uniform(0, 1, (FEATURES_DIM))
            label = np.array((2 * (features1 + features2)).sum() + 0.4)
            return Sample.from_ndarray([features1, features2], label)

        trainingData = self.sc.parallelize(range(
            0, data_len)).map(lambda i: gen_rand_sample())

        model_test = Sequential()
        branches = ParallelTable()
        branch1 = Sequential().add(Linear(FEATURES_DIM, 1)).add(ReLU())
        branch2 = Sequential().add(Linear(FEATURES_DIM, 1)).add(ReLU())
        branches.add(branch1).add(branch2)
        model_test.add(branches).add(CAddTable())

        optim_method = SGD(learningrate=0.01,
                           learningrate_decay=0.0002,
                           weightdecay=0.0,
                           momentum=0.0,
                           dampening=0.0,
                           nesterov=False,
                           leaningrate_schedule=Poly(
                               0.5, int((data_len / batch_size) * epoch_num)))
        optimizer = Optimizer.create(model=model_test,
                                     training_set=trainingData,
                                     criterion=MSECriterion(),
                                     optim_method=optim_method,
                                     end_trigger=MaxEpoch(epoch_num),
                                     batch_size=batch_size)
        optimizer.set_validation(batch_size=batch_size,
                                 val_rdd=trainingData,
                                 trigger=EveryEpoch(),
                                 val_method=[Top1Accuracy()])

        optimizer.optimize()
Beispiel #17
0
 def test_set_input_output_format(self):
     input1 = Input()
     lstm1 = Recurrent().add(LSTM(128, 128))(input1)
     fc1 = Linear(128, 10)
     t1 = TimeDistributed(fc1)(lstm1)
     model = Model(inputs=input1, outputs=t1)
     model.set_input_formats([4])
     model.set_output_formats([27])
 def test_predict(self):
     np.random.seed(100)
     total_length = 6
     features = np.random.uniform(0, 1, (total_length, 2))
     label = (features).sum() + 0.4
     predict_data = self.sc.parallelize(range(0, total_length)).map(
         lambda i: Sample.from_ndarray(features[i], label))
     model = Linear(2, 1,
                    "Xavier").set_name("linear1").set_seed(1234).reset()
     predict_result = model.predict(predict_data)
     p = predict_result.take(6)
     ground_label = np.array([[-0.47596836], [-0.37598032], [-0.00492062],
                              [-0.5906958], [-0.12307882], [-0.77907401]],
                             dtype="float32")
     for i in range(0, total_length):
         self.assertTrue(
             np.allclose(p[i], ground_label[i], atol=1e-6, rtol=0))
Beispiel #19
0
 def test_load_model_proto(self):
     fc1 = Linear(4, 2)
     fc1.set_weights([np.ones((4, 2)), np.ones((2, ))])
     tmp_path = tempfile.mktemp()
     fc1.saveModel(tmp_path, None, True)
     fc1_loaded = Model.loadModel(tmp_path)
     assert_allclose(fc1_loaded.get_weights()[0], fc1.get_weights()[0])
Beispiel #20
0
 def test_load_sequential_of(self):
     fc1 = Linear(4, 2)
     model = Sequential()
     model.add(fc1)
     tmp_path = tempfile.mktemp()
     model.save(tmp_path, True)
     model_loaded = Model.load(tmp_path)
     assert "Sequential" in str(type(model_loaded))
     assert len(model_loaded.layers) == 1
Beispiel #21
0
 def test_load_model_of(self):
     input = Input()
     fc1 = Linear(4, 2)(input)
     model = Model(input, fc1)
     tmp_path = tempfile.mktemp()
     model.save(tmp_path, True)
     model_loaded = Model.load(tmp_path)
     assert "Model" in str(type(model_loaded))
     assert len(model_loaded.layers) == 2
 def test_load_model(self):
     fc1 = Linear(4, 2)
     fc1.set_weights([np.ones((4, 2)), np.ones((2, ))])
     tmp_path = tempfile.mktemp()
     fc1.save(tmp_path, True)
     fc1_loaded = Model.load(tmp_path)
     self.assertTrue(np.allclose(fc1_loaded.get_weights()[0],
                                 fc1.get_weights()[0]))
Beispiel #23
0
    def test_forward_multiple(self):
        from bigdl.nn.layer import Linear
        rng = RNG()
        rng.set_seed(100)

        input = [rng.uniform(0.0, 0.1, [2]),
                 rng.uniform(0.0, 0.1, [2]) + 0.2]

        grad_output = [rng.uniform(0.0, 0.1, [3]),
                       rng.uniform(0.0, 0.1, [3]) + 0.2]

        linear1 = Linear(2, 3)
        linear2 = Linear(2, 3)

        module = ParallelTable()
        module.add(linear1)
        module.add(linear2)
        module.forward(input)
        module.backward(input, grad_output)
Beispiel #24
0
 def test_create_node(self):
     import numpy as np
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     model = Model([fc1, fc2], [output1])
     fc1.element().set_weights([np.ones((4, 2)), np.ones((2,))])
     fc2.element().set_weights([np.ones((4, 2)), np.ones((2,))])
     output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
                             np.array([0.5, 0.4, -0.2, -0.1])])
     assert_allclose(output,
                     np.array([2.2, 2.2]))
Beispiel #25
0
 def test_local_predict_class(self):
     feature_num = 2
     data_len = 3
     X_ = np.random.uniform(-1, 1, (data_len, feature_num))
     model = Sequential()
     l1 = Linear(feature_num, 1)
     model.add(l1)
     model.add(Sigmoid())
     model.set_seed(1234).reset()
     predict_result = model.predict_class(X_)
     assert_array_equal(predict_result, np.ones([3]))
Beispiel #26
0
    def test_forward_backward(self):
        from bigdl.nn.layer import Linear
        rng = RNG()
        rng.set_seed(100)

        linear = Linear(4, 5)
        input = rng.uniform(0.0, 1.0, [4])
        output = linear.forward(input)
        assert_allclose(output,
                        np.array([0.41366524,
                                  0.009532653,
                                  -0.677581,
                                  0.07945433,
                                  -0.5742568]),
                        atol=1e-6, rtol=0)
        mse = MSECriterion()
        target = rng.uniform(0.0, 1.0, [5])
        loss = mse.forward(output, target)
        print("loss: " + str(loss))
        grad_output = mse.backward(output, rng.uniform(0.0, 1.0, [5]))
        l_grad_output = linear.backward(input, grad_output)
    def test_forward_backward(self):
        from bigdl.nn.layer import Linear
        rng = RNG()
        rng.set_seed(100)

        linear = Linear(4, 5)
        input = rng.uniform(0.0, 1.0, [4])
        output = linear.forward(input)
        assert_allclose(output,
                        np.array([0.41366524,
                                  0.009532653,
                                  -0.677581,
                                  0.07945433,
                                  -0.5742568]),
                        atol=1e-6, rtol=0)
        mse = MSECriterion()
        target = rng.uniform(0.0, 1.0, [5])
        loss = mse.forward(output, target)
        print("loss: " + str(loss))
        grad_output = mse.backward(output, rng.uniform(0.0, 1.0, [5]))
        l_grad_output = linear.backward(input, grad_output)
 def test_load_model_proto(self):
     fc1 = Linear(4, 2)
     fc1.set_weights([np.ones((4, 2)), np.ones((2,))])
     tmp_path = tempfile.mktemp()
     fc1.saveModel(tmp_path, None, True)
     fc1_loaded = Model.loadModel(tmp_path)
     assert_allclose(fc1_loaded.get_weights()[0],
                     fc1.get_weights()[0])
 def test_set_seed(self):
     w_init = Xavier()
     b_init = Zeros()
     l1 = Linear(10, 20).set_init_method(w_init, b_init).set_name("linear1").set_seed(1234).reset()  # noqa
     l2 = Linear(10, 20).set_init_method(w_init, b_init).set_name("linear2").set_seed(1234).reset()  # noqa
     p1 = l1.parameters()
     p2 = l2.parameters()
     self.assertTrue((p1["linear1"]["weight"] == p2["linear2"]["weight"]).all())  # noqa
Beispiel #30
0
    def test_tf_load(self):
        linear = Linear(10, 2)()
        sigmoid = Sigmoid()(linear)
        softmax = SoftMax().set_name("output")(sigmoid)
        model = BModel(linear, softmax)
        input = np.random.random((4, 10))

        tmp_path = create_tmp_path() + "/model.pb"

        model.save_tensorflow([("input", [4, 10])], tmp_path)

        model_reloaded = Net.load_tf(tmp_path, ["input"], ["output"])
        expected_output = model.forward(input)
        output = model_reloaded.forward(input)
        self.assert_allclose(output, expected_output)
 def test_set_seed(self):
     l1 = Linear(
         10, 20,
         "Xavier").set_name("linear1").set_seed(1234).reset()  # noqa
     l2 = Linear(
         10, 20,
         "Xavier").set_name("linear2").set_seed(1234).reset()  # noqa
     p1 = l1.parameters()
     p2 = l2.parameters()
     self.assertTrue(
         (p1["linear1"]["weight"] == p2["linear2"]["weight"]).all())  # noqa
Beispiel #32
0
    def test_train_DataSet(self):
        batch_size = 8
        epoch_num = 5
        images = []
        labels = []
        for i in range(0, 8):
            features = np.random.uniform(0, 1, (200, 200, 3))
            label = np.array([2])
            images.append(features)
            labels.append(label)

        image_frame = DistributedImageFrame(self.sc.parallelize(images),
                                            self.sc.parallelize(labels))

        transformer = Pipeline([
            BytesToMat(),
            Resize(256, 256),
            CenterCrop(224, 224),
            ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
            MatToTensor(),
            ImageFrameToSample(target_keys=['label'])
        ])
        data_set = DataSet.image_frame(image_frame).transform(transformer)

        model = Sequential()
        model.add(SpatialConvolution(3, 1, 5, 5))
        model.add(View([1 * 220 * 220]))
        model.add(Linear(1 * 220 * 220, 20))
        model.add(LogSoftMax())
        optim_method = SGD(learningrate=0.01)
        optimizer = Optimizer.create(model=model,
                                     training_set=data_set,
                                     criterion=ClassNLLCriterion(),
                                     optim_method=optim_method,
                                     end_trigger=MaxEpoch(epoch_num),
                                     batch_size=batch_size)
        optimizer.set_validation(batch_size=batch_size,
                                 val_rdd=data_set,
                                 trigger=EveryEpoch(),
                                 val_method=[Top1Accuracy()])

        trained_model = optimizer.optimize()

        predict_result = trained_model.predict_image(
            image_frame.transform(transformer))
        assert_allclose(predict_result.get_predict().count(), 8)
Beispiel #33
0
 def test_graph_backward(self):
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     output2 = Threshold(10.0)(cadd)
     model = Model([fc1, fc2], [output1, output2])
     fc1.element().set_weights([np.ones((4, 2)), np.ones((2,))])
     fc2.element().set_weights([np.ones((4, 2)) * 2, np.ones((2,)) * 2])
     output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
                             np.array([0.5, 0.4, -0.2, -0.1])])
     gradInput = model.backward([np.array([0.1, 0.2, -0.3, -0.4]),
                                 np.array([0.5, 0.4, -0.2, -0.1])],
                                [np.array([1.0, 2.0]),
                                 np.array([3.0, 4.0])])
     assert_allclose(gradInput[0],
                     np.array([3.0, 3.0, 3.0, 3.0]))
     assert_allclose(gradInput[1],
                     np.array([6.0, 6.0, 6.0, 6.0]))
 def test_create_node(self):
     import numpy as np
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     model = Model([fc1, fc2], [output1])
     fc1.element().set_weights([np.ones((4, 2)), np.ones((2,))])
     fc2.element().set_weights([np.ones((4, 2)), np.ones((2,))])
     output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
                             np.array([0.5, 0.4, -0.2, -0.1])])
     assert_allclose(output,
                     np.array([2.2, 2.2]))
 def test_graph_backward(self):
     fc1 = Linear(4, 2)()
     fc2 = Linear(4, 2)()
     cadd = CAddTable()([fc1, fc2])
     output1 = ReLU()(cadd)
     output2 = Threshold(10.0)(cadd)
     model = Model([fc1, fc2], [output1, output2])
     fc1.element().set_weights([np.ones((4, 2)), np.ones((2,))])
     fc2.element().set_weights([np.ones((4, 2)) * 2, np.ones((2,)) * 2])
     output = model.forward([np.array([0.1, 0.2, -0.3, -0.4]),
                             np.array([0.5, 0.4, -0.2, -0.1])])
     gradInput = model.backward([np.array([0.1, 0.2, -0.3, -0.4]),
                                 np.array([0.5, 0.4, -0.2, -0.1])],
                                [np.array([1.0, 2.0]),
                                 np.array([3.0, 4.0])])
     assert_allclose(gradInput[0],
                     np.array([3.0, 3.0, 3.0, 3.0]))
     assert_allclose(gradInput[1],
                     np.array([6.0, 6.0, 6.0, 6.0]))
Beispiel #36
0
# load pretrained caffe model
preTrained_model = Net.load_caffe(model_def_path, model_weight_path)

# create a new model by remove layers after pool5/drop_7x7_s1
part_model = preTrained_model.new_graph(["pool5/drop_7x7_s1"])

# optionally freeze layers from input to pool4/3x3_s2 inclusive
# model.freeze_up_to(["pool4/3x3_s2"])


from bigdl.nn.layer import Sequential, Linear, View, LogSoftMax
from bigdl.nn.criterion import CrossEntropyCriterion

# add a new linear layer with numClass outputs, in our example, it's 6.
scene_network = Sequential().add(part_model).add(View([1024])).add(Linear(1024, 6)).add(LogSoftMax())

transformer = ChainedPreprocessing(
    [RowToImageFeature(), ImageResize(256, 256), ImageCenterCrop(224, 224),
     ImageChannelNormalize(123.0, 117.0, 104.0), ImageMatToTensor(), ImageFeatureToTensor()])

classifier = NNClassifier(scene_network, CrossEntropyCriterion(), transformer).setLearningRate(0.001).setLearningRateDecay(1e-3).setBatchSize(20).setMaxEpoch(2).setFeaturesCol("image").setCachingSample(False)

# train the model
scene_classification_model = classifier.fit(trainingDF)
print("Finished training")


# evaluate the model
print("Start evaluation:")
predictionDF = scene_classification_model.transform(validationDF).cache()
    def test_simple_flow(self):
        FEATURES_DIM = 2
        data_len = 100
        batch_size = 32
        epoch_num = 5

        def gen_rand_sample():
            features = np.random.uniform(0, 1, (FEATURES_DIM))
            label = np.array((2 * features).sum() + 0.4)
            return Sample.from_ndarray(features, label)

        trainingData = self.sc.parallelize(range(0, data_len)).map(
            lambda i: gen_rand_sample())

        model_test = Sequential()
        l1_test = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()) \
            .set_name("linear1_test")
        assert "linear1_test" == l1_test.name()
        model_test.add(l1_test)
        model_test.add(Sigmoid())

        model = Sequential()
        l1 = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()).set_name("linear1")
        assert "linear1" == l1.name()
        model.add(l1)

        optim_method = SGD(learningrate=0.01, learningrate_decay=0.0002, weightdecay=0.0,
                           momentum=0.0, dampening=0.0, nesterov=False,
                           leaningrate_schedule=Poly(0.5, int((data_len / batch_size) * epoch_num)))
        optimizer = Optimizer.create(
            model=model_test,
            training_set=trainingData,
            criterion=MSECriterion(),
            optim_method=optim_method,
            end_trigger=MaxEpoch(epoch_num),
            batch_size=batch_size)
        optimizer.set_validation(
            batch_size=batch_size,
            val_rdd=trainingData,
            trigger=EveryEpoch(),
            val_method=[Top1Accuracy()]
        )

        optimizer.optimize()

        optimizer.set_model(model=model)
        tmp_dir = tempfile.mkdtemp()
        optimizer.set_checkpoint(SeveralIteration(1), tmp_dir)
        train_summary = TrainSummary(log_dir=tmp_dir,
                                     app_name="run1")
        train_summary.set_summary_trigger("LearningRate", SeveralIteration(1))
        val_summary = ValidationSummary(log_dir=tmp_dir,
                                        app_name="run1")
        optimizer.set_train_summary(train_summary)
        optimizer.set_val_summary(val_summary)
        optimizer.set_end_when(MaxEpoch(epoch_num * 2))

        trained_model = optimizer.optimize()
        lr_result = train_summary.read_scalar("LearningRate")
        top1_result = val_summary.read_scalar("Top1Accuracy")

        # TODO: add result validation
        parameters = trained_model.parameters()

        assert parameters["linear1"] is not None
        print("parameters %s" % parameters["linear1"])
        predict_result = trained_model.predict(trainingData)
        p = predict_result.take(2)
        print("predict predict: \n")
        for i in p:
            print(str(i) + "\n")
        print(len(p))

        test_results = trained_model.evaluate(trainingData, 32, [Top1Accuracy()])
        for test_result in test_results:
            print(test_result)
Beispiel #38
0
avg_pool = end_points['Mixed_3c']
export_tf(sess,
          "file:///home/hduser/slim/tfnet/",
          inputs=[images],
          outputs=[avg_pool])
from zoo.pipeline.api.net import TFNet
amodel = TFNet.from_export_folder("file:///home/hduser/slim/tfnet/")
from bigdl.nn.layer import Sequential, Transpose, Contiguous, Linear, ReLU, SoftMax, Reshape, View, MulConstant, SpatialAveragePooling
full_model = Sequential()
full_model.add(Transpose([(2, 4), (2, 3)]))
scalar = 1. / 255
full_model.add(MulConstant(scalar))
full_model.add(Contiguous())
full_model.add(amodel)
full_model.add(View([1024]))
full_model.add(Linear(1024, 5))
import re
from bigdl.nn.criterion import CrossEntropyCriterion
from pyspark import SparkConf
from pyspark.ml import Pipeline
from pyspark.sql import SQLContext
from pyspark.sql.functions import col, udf
from pyspark.sql.types import DoubleType, StringType
from zoo.common.nncontext import *
from zoo.feature.image import *
from zoo.pipeline.api.keras.layers import Dense, Input, Flatten
from zoo.pipeline.api.keras.models import *
from zoo.pipeline.api.net import *
from zoo.pipeline.nnframes import *
image_path = "hdfs:///project_data/pets/train_images/*"
csv_path = "hdfs:///project_data/pets/train/train.csv"
Beispiel #39
0
    def test_simple_flow(self):
        FEATURES_DIM = 2
        data_len = 100
        batch_size = 32
        epoch_num = 5

        def gen_rand_sample():
            features = np.random.uniform(0, 1, (FEATURES_DIM))
            label = np.array((2 * features).sum() + 0.4)
            return Sample.from_ndarray(features, label)

        trainingData = self.sc.parallelize(range(0, data_len)).map(
            lambda i: gen_rand_sample())

        model_test = Sequential()
        l1_test = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()) \
            .set_name("linear1_test")
        assert "linear1_test" == l1_test.name()
        model_test.add(l1_test)
        model_test.add(Sigmoid())

        model = Sequential()
        l1 = Linear(FEATURES_DIM, 1).set_init_method(Xavier(), Zeros()).set_name("linear1")
        assert "linear1" == l1.name()
        model.add(l1)

        optim_method = SGD(learningrate=0.01, learningrate_decay=0.0002, weightdecay=0.0,
                           momentum=0.0, dampening=0.0, nesterov=False,
                           leaningrate_schedule=Poly(0.5, int((data_len / batch_size) * epoch_num)))
        optimizer = Optimizer.create(
            model=model_test,
            training_set=trainingData,
            criterion=MSECriterion(),
            optim_method=optim_method,
            end_trigger=MaxEpoch(epoch_num),
            batch_size=batch_size)
        optimizer.set_validation(
            batch_size=batch_size,
            val_rdd=trainingData,
            trigger=EveryEpoch(),
            val_method=[Top1Accuracy()]
        )

        optimizer.optimize()

        optimizer.set_model(model=model)
        tmp_dir = tempfile.mkdtemp()
        optimizer.set_checkpoint(SeveralIteration(1), tmp_dir)
        train_summary = TrainSummary(log_dir=tmp_dir,
                                     app_name="run1")
        train_summary.set_summary_trigger("LearningRate", SeveralIteration(1))
        val_summary = ValidationSummary(log_dir=tmp_dir,
                                        app_name="run1")
        optimizer.set_train_summary(train_summary)
        optimizer.set_val_summary(val_summary)
        optimizer.set_end_when(MaxEpoch(epoch_num * 2))

        trained_model = optimizer.optimize()
        lr_result = train_summary.read_scalar("LearningRate")
        top1_result = val_summary.read_scalar("Top1Accuracy")

        # TODO: add result validation
        parameters = trained_model.parameters()

        assert parameters["linear1"] is not None
        print("parameters %s" % parameters["linear1"])
        predict_result = trained_model.predict(trainingData)
        p = predict_result.take(2)
        print("predict predict: \n")
        for i in p:
            print(str(i) + "\n")
        print(len(p))

        test_results = trained_model.evaluate(trainingData, 32, [Top1Accuracy()])
        for test_result in test_results:
            print(test_result)