Ejemplo n.º 1
0
    def test_merge_method_seq_concat(self):
        bx1 = BLayer.Input(shape=(10, ))
        bx1_1 = BLayer.Input(shape=(10, ))
        bx2 = BLayer.Input(shape=(10, ))
        by1 = BLayer.Dense(12, activation="sigmoid")(bx1)
        bbranch1_node = BModel(bx1, by1)(bx1_1)
        bbranch2 = BSequential()
        bbranch2.add(BLayer.Dense(12, input_dim=10))
        bbranch2_node = bbranch2(bx2)
        bz = BLayer.merge([bbranch1_node, bbranch2_node], mode="concat")
        bmodel = BModel([bx1_1, bx2], bz)

        kx1 = KLayer.Input(shape=(10, ))
        kx2 = KLayer.Input(shape=(10, ))
        ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
        kbranch1_node = KModel(kx1, ky1)(kx1)
        kbranch2 = KSequential()
        kbranch2.add(KLayer.Dense(12, input_dim=10))
        kbranch2_node = kbranch2(kx2)
        kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
        kmodel = KModel([kx1, kx2], kz)

        input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
        self.compare_newapi(kmodel, bmodel, input_data,
                            self.convert_two_dense_model)
Ejemplo n.º 2
0
    def test_merge_method_seq_concat(self):
        bx1 = BLayer.Input(shape=(10, ))
        bx2 = BLayer.Input(shape=(10, ))
        by1 = BLayer.Dense(12, activation="sigmoid")(bx1)
        bbranch1_node = BModel(bx1, by1)(bx1)
        bbranch2 = BSequential()
        bbranch2.add(BLayer.Dense(12, input_dim=10))
        bbranch2_node = bbranch2(bx2)
        bz = BLayer.merge([bbranch1_node, bbranch2_node], mode="concat")
        bmodel = BModel([bx1, bx2], bz)

        kx1 = KLayer.Input(shape=(10, ))
        kx2 = KLayer.Input(shape=(10, ))
        ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
        kbranch1_node = KModel(kx1, ky1)(kx1)
        kbranch2 = KSequential()
        kbranch2.add(KLayer.Dense(12, input_dim=10))
        kbranch2_node = kbranch2(kx2)
        kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
        kmodel = KModel([kx1, kx2], kz)

        input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
        self.compare_newapi(kmodel, bmodel, input_data, self.convert_two_dense_model)
Ejemplo n.º 3
0
    def test_load_keras_seq_of(self):
        from bigdl.nn.keras.topology import Sequential as KSequential
        from bigdl.nn.keras.layer import Dense

        model = KSequential()
        fc1 = Dense(2, input_shape=[2, 3])
        model.add(fc1)
        tmp_path = tempfile.mktemp()
        model.save(tmp_path, True)
        model_loaded = KSequential.load(tmp_path)
        assert "bigdl.nn.keras.topology.Sequential" in str(type(model_loaded))
        assert len(model_loaded.layers) == 1
Ejemplo n.º 4
0
    def test_load_keras_seq_of(self):
        from bigdl.nn.keras.topology import Sequential as KSequential
        from bigdl.nn.keras.layer import Dense

        model = KSequential()
        fc1 = Dense(2, input_shape=[2, 3])
        model.add(fc1)
        tmp_path = tempfile.mktemp()
        model.save(tmp_path, True)
        model_loaded = KSequential.load(tmp_path)
        assert "bigdl.nn.keras.topology.Sequential" in str(type(model_loaded))
        assert len(model_loaded.layers) == 1
Ejemplo n.º 5
0
 def compare_newapi(self, klayer, blayer, input_data, weight_converter=None,
                    is_training=False, rtol=1e-6, atol=1e-6):
     from keras.models import Sequential as KSequential
     from bigdl.nn.keras.topology import Sequential as BSequential
     bmodel = BSequential()
     bmodel.add(blayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from bigdl.nn.keras.layer import BatchNormalization
     if isinstance(blayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         blayer.set_running_mean(k_running_mean)
         blayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         bmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     bmodel.training(is_training)
     boutput = bmodel.forward(input_data)
     self.assert_allclose(boutput, koutput, rtol=rtol, atol=atol)
Ejemplo n.º 6
0
    def test_train_dataset(self):
        images = []
        labels = []
        for i in range(0, 8):
            features = np.random.uniform(0, 1, (200, 200, 3))
            label = np.array([2])
            images.append(features)
            labels.append(label)
        image_frame = DistributedImageFrame(self.sc.parallelize(images),
                                            self.sc.parallelize(labels))

        transformer = Pipeline([
            BytesToMat(),
            Resize(256, 256),
            CenterCrop(224, 224),
            ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
            MatToTensor(),
            ImageFrameToSample(target_keys=['label'])
        ])
        data_set = DataSet.image_frame(image_frame).transform(transformer)

        model = BSequential()
        model.add(BLayer.Convolution2D(1, 5, 5, input_shape=(3, 224, 224)))
        model.add(BLayer.Reshape((1 * 220 * 220, )))
        model.add(BLayer.Dense(20, activation="softmax"))
        model.compile(optimizer="sgd",
                      loss="sparse_categorical_crossentropy",
                      metrics=["accuracy"])
        model.fit(data_set, batch_size=8, nb_epoch=2, validation_data=data_set)
Ejemplo n.º 7
0
 def test_train(self):
     x = np.random.random([32, 10])
     y = np.random.random([
         32,
     ])
     model = BSequential()
     model.add(BLayer.Dense(5, input_shape=(10, )))
     model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"])
     model.fit(x, y, batch_size=8, nb_epoch=2, validation_data=(x, y))
     model.evaluate(x, y, batch_size=8)
     model.predict(x)
Ejemplo n.º 8
0
    def test_train_dataset(self):
        images = []
        labels = []
        for i in range(0, 8):
            features = np.random.uniform(0, 1, (200, 200, 3))
            label = np.array([2])
            images.append(features)
            labels.append(label)
        image_frame = DistributedImageFrame(self.sc.parallelize(images),
                                            self.sc.parallelize(labels))

        transformer = Pipeline([BytesToMat(), Resize(256, 256), CenterCrop(224, 224),
                                ChannelNormalize(0.485, 0.456, 0.406, 0.229, 0.224, 0.225),
                                MatToTensor(), ImageFrameToSample(target_keys=['label'])])
        data_set = DataSet.image_frame(image_frame).transform(transformer)

        model = BSequential()
        model.add(BLayer.Convolution2D(1, 5, 5, input_shape=(3, 224, 224)))
        model.add(BLayer.Reshape((1*220*220, )))
        model.add(BLayer.Dense(20, activation="softmax"))
        model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy", metrics=["accuracy"])
        model.fit(data_set, batch_size=8, nb_epoch=2, validation_data=data_set)
Ejemplo n.º 9
0
 def test_train(self):
     x = np.random.random([32, 10])
     y = np.random.random([32, ])
     model = BSequential()
     model.add(BLayer.Dense(5, input_shape=(10, )))
     model.compile(optimizer="sgd", loss="mse", metrics=["accuracy"])
     model.fit(x, y, batch_size=8, nb_epoch=2, validation_data=(x, y))
     model.evaluate(x, y, batch_size=8)
     model.predict(x)
Ejemplo n.º 10
0
def build_model(class_num):
    model = Sequential()
    model.add(Reshape((1, 28, 28), input_shape=(28, 28, 1)))
    model.add(Convolution2D(32, 3, 3, activation="relu"))
    model.add(Convolution2D(32, 3, 3, activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2)))
    model.add(Dropout(0.25))
    model.add(Flatten())
    model.add(Dense(128, activation="relu"))
    model.add(Dropout(0.5))
    model.add(Dense(class_num, activation="softmax"))
    return model
Ejemplo n.º 11
0
mnist_path = "datasets/mnist"
(X_train, Y_train), (X_test, Y_test) = mnist.load_data(mnist_path)
X_train = X_train[0:20000]
Y_train = Y_train[0:20000]

X_test = X_test[0:2000]
Y_test = Y_test[0:2000]

print(X_train.shape)
print(X_test.shape)
print(Y_train.shape)
print(Y_test.shape)

num_fc = 512
num_outputs = 10
model = Sequential()
model.add(Reshape((1, 28, 28), input_shape=(28, 28, 1)))
model.add(Convolution2D(20, 3, 3, activation="relu", input_shape=(1, 28, 28)))
model.add(MaxPooling2D())
model.add(Convolution2D(50, 3, 3, activation="relu", name="conv2_5x5"))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(num_fc, activation="relu", name="fc1"))
model.add(Dense(num_outputs, activation="softmax", name="fc2"))

print(model.get_input_shape())
print(model.get_output_shape())

model.compile(loss='sparse_categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
Ejemplo n.º 12
0
def build_model(class_num):
    model = Sequential()
    model.add(Reshape((1, 28, 28), input_shape=(28, 28, 1)))
    model.add(Convolution2D(6, 5, 5, activation="tanh", name="conv1_5x5"))
    model.add(MaxPooling2D())
    model.add(Convolution2D(12, 5, 5, activation="tanh", name="conv2_5x5"))
    model.add(MaxPooling2D())
    model.add(Flatten())
    model.add(Dense(100, activation="tanh", name="fc1"))
    model.add(Dense(class_num, activation="softmax", name="fc2"))
    return model
Ejemplo n.º 13
0
def build_model(class_num):
    model = Sequential()
    model.add(Reshape((1, 28, 28), input_shape=(28, 28, 1)))
    model.add(Convolution2D(6, 5, 5, activation="tanh", name="conv1_5x5"))
    model.add(MaxPooling2D())
    model.add(Convolution2D(12, 5, 5, activation="tanh", name="conv2_5x5"))
    model.add(MaxPooling2D())
    model.add(Flatten())
    model.add(Dense(100, activation="tanh", name="fc1"))
    model.add(Dense(class_num, activation="softmax", name="fc2"))
    return model
                    for x in features_label[0]]), features_label[1] + 1))

pdf_sml = pdf.iloc[26:50, :]
sdf = sqlContext.createDataFrame(pdf_sml)
rdd_test_images = sdf.drop('image_id').rdd
rdd_test_labels = sc.parallelize(
    pd.read_csv("/home/matt/Bengalia/train.csv")["grapheme_root"].iloc[26:50])
rdd_test_sample = rdd_test_images.zip(rdd_test_labels).map(
    lambda features_label: common.Sample.from_ndarray(
        np.asarray([x / 255
                    for x in features_label[0]]), features_label[1] + 1))

from bigdl.nn.keras.topology import Sequential
from bigdl.nn.keras.layer import *

lenet_model = Sequential()
lenet_model.add(Reshape((1, 137, 236), input_shape=([32332])))
lenet_model.add(Convolution2D(3, 5, 5, activation="tanh", name="conv1_5x5"))
lenet_model.add(MaxPooling2D())
lenet_model.add(Flatten())
lenet_model.add(Dense(200, activation="tanh", name="fc1"))
lenet_model.add(Dense(168, activation="softmax", name="fc2"))
lenet_model.get_input_shape()
lenet_model.get_output_shape()

# Create an Optimizer

optimizer = Optimizer(model=lenet_model,
                      training_rdd=rdd_train_sample,
                      criterion=ClassNLLCriterion(),
                      optim_method=SGD(learningrate=0.4,