Ejemplo n.º 1
0
    def test_train_dataset(self):
        images = []
        labels = []
        for i in range(0, 8):
            features = np.random.uniform(0, 1, (200, 200, 3))
            label = np.array([2])
            images.append(features)
            labels.append(label)
        image_frame = DistributedImageFrame(self.sc.parallelize(images),
                                            self.sc.parallelize(labels))

        transformer = Pipeline([
            BytesToMat(),
            Resize(256, 256),
            CenterCrop(224, 224),
            ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
            MatToTensor(),
            ImageFrameToSample(target_keys=['label'])
        ])
        data_set = DataSet.image_frame(image_frame).transform(transformer)

        model = BSequential()
        model.add(BLayer.Convolution2D(1, 5, 5, input_shape=(3, 224, 224)))
        model.add(BLayer.Reshape((1 * 220 * 220, )))
        model.add(BLayer.Dense(20, activation="softmax"))
        model.compile(optimizer="sgd",
                      loss="sparse_categorical_crossentropy",
                      metrics=["accuracy"])
        model.fit(data_set, batch_size=8, nb_epoch=2, validation_data=data_set)
Ejemplo n.º 2
0
    def test_merge_method_seq_concat(self):
        bx1 = BLayer.Input(shape=(10, ))
        bx1_1 = BLayer.Input(shape=(10, ))
        bx2 = BLayer.Input(shape=(10, ))
        by1 = BLayer.Dense(12, activation="sigmoid")(bx1)
        bbranch1_node = BModel(bx1, by1)(bx1_1)
        bbranch2 = BSequential()
        bbranch2.add(BLayer.Dense(12, input_dim=10))
        bbranch2_node = bbranch2(bx2)
        bz = BLayer.merge([bbranch1_node, bbranch2_node], mode="concat")
        bmodel = BModel([bx1_1, bx2], bz)

        kx1 = KLayer.Input(shape=(10, ))
        kx2 = KLayer.Input(shape=(10, ))
        ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
        kbranch1_node = KModel(kx1, ky1)(kx1)
        kbranch2 = KSequential()
        kbranch2.add(KLayer.Dense(12, input_dim=10))
        kbranch2_node = kbranch2(kx2)
        kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
        kmodel = KModel([kx1, kx2], kz)

        input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
        self.compare_newapi(kmodel, bmodel, input_data,
                            self.convert_two_dense_model)
Ejemplo n.º 3
0
 def test_train(self):
     x = np.random.random([32, 10])
     y = np.random.random([
         32,
     ])
     model = BSequential()
     model.add(BLayer.Dense(5, input_shape=(10, )))
     model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"])
     model.fit(x, y, batch_size=8, nb_epoch=2, validation_data=(x, y))
     model.evaluate(x, y, batch_size=8)
     model.predict(x)
Ejemplo n.º 4
0
 def compare_newapi(self, klayer, blayer, input_data, weight_converter=None,
                    is_training=False, rtol=1e-6, atol=1e-6):
     from keras.models import Sequential as KSequential
     from bigdl.nn.keras.topology import Sequential as BSequential
     bmodel = BSequential()
     bmodel.add(blayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from bigdl.nn.keras.layer import BatchNormalization
     if isinstance(blayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         blayer.set_running_mean(k_running_mean)
         blayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         bmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     bmodel.training(is_training)
     boutput = bmodel.forward(input_data)
     self.assert_allclose(boutput, koutput, rtol=rtol, atol=atol)