Пример #1
0
 def compare_layer(self,
                   klayer,
                   zlayer,
                   input_data,
                   weight_converter=None,
                   is_training=False,
                   rtol=1e-6,
                   atol=1e-6):
     """
     Compare forward results for Keras layer against Zoo Keras API layer.
     """
     from keras.models import Sequential as KSequential
     from zoo.pipeline.api.keras.models import Sequential as ZSequential
     zmodel = ZSequential()
     zmodel.add(zlayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from zoo.pipeline.api.keras.layers import BatchNormalization
     if isinstance(zlayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         zlayer.set_running_mean(k_running_mean)
         zlayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         zmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     zmodel.training(is_training)
     zoutput = zmodel.forward(input_data)
     self.assert_allclose(zoutput, koutput, rtol=rtol, atol=atol)
Пример #2
0
    def compare_unary_op(self, kk_func, z_layer, shape, rtol=1e-5, atol=1e-5):
        x = klayers.Input(shape=shape[1:])

        batch = shape[0]

        kkresult = kk_func(x)
        x_value = np.random.uniform(0, 1, shape)

        k_grads = KK.get_session().run(KK.gradients(kkresult, x),
                                       feed_dict={x: x_value})
        k_output = KK.get_session().run(kkresult, feed_dict={x: x_value})
        model = Sequential()
        model.add(InputLayer(shape[1:]))
        model.add(z_layer)
        z_output = model.forward(x_value)
        grad_output = np.array(z_output)
        grad_output.fill(1.0)
        z_grad = model.backward(x_value, grad_output)

        z_output2 = model.forward(x_value)
        z_grad2 = model.backward(x_value, grad_output)
        self.assert_allclose(z_output, z_output2, rtol, atol)
        self.assert_allclose(z_grad, z_grad2, rtol, atol)

        self.assert_allclose(z_output, k_output, rtol, atol)
        self.assert_allclose(z_grad, k_grads[0], rtol, atol)
Пример #3
0
    def init_with_default_embedding(cls, vocab=40990, seq_len=77, n_block=12, resid_drop=0.1,
                                    attn_drop=0.1, n_head=12, hidden_size=768,
                                    embedding_drop=0.1, mask_attention=True):
        """
        vocab: vocabulary size of training data, default is 40990
        seq_len: max sequence length of training data, default is 77
        n_block: block number, default is 12
        resid_drop: drop probability of projection, default is 0.1
        attn_drop: drop probability of attention, default is 0.1
        n_head: head number, default is 12
        hidden_size: is also embedding size
        embedding_drop: drop probability of embedding layer, default is 0.1
        mask_attention: whether unidirectional or bidirectional, default is true(unidirectional)
        """
        from bigdl.nn.layer import Squeeze
        embedding = Sequential()

        embedding.add(Reshape([seq_len * 2], input_shape=(seq_len, 2)))\
            .add(Embedding(vocab, hidden_size, input_length=seq_len * 2))\
            .add(Dropout(embedding_drop))\
            .add(Reshape((seq_len, 2, hidden_size)))\
            .add(KerasLayerWrapper(Sum(dimension=3, squeeze=True)))
        # walk around for bug #1208, need remove this line after the bug fixed
        embedding.add(KerasLayerWrapper(Squeeze(dim=3)))

        return TransformerLayer(n_block, resid_drop, attn_drop, n_head, mask_attention,
                                embedding, input_shape=(seq_len, 2))
Пример #4
0
    def test_square_as_first_layer(self):
        def z_func(x):
            return square(x)

        ll = Lambda(function=z_func, input_shape=[2, 3])
        seq = Sequential()
        seq.add(ll)
        result = seq.forward(np.ones([2, 3]))
        assert (result == np.ones([2, 3])).all()
Пример #5
0
 def test_save_load_Sequential(self):
     zmodel = ZSequential()
     dense = ZLayer.Dense(10, input_dim=5)
     zmodel.add(dense)
     tmp_path = create_tmp_path()
     zmodel.saveModel(tmp_path, None, True)
     model_reloaded = Net.load(tmp_path)
     input_data = np.random.random([10, 5])
     y = np.random.random([10, 10])
     model_reloaded.compile(optimizer="adam", loss="mse")
     model_reloaded.fit(x=input_data, y=y, batch_size=8, nb_epoch=1)
Пример #6
0
 def test_regularizer(self):
     model = ZSequential()
     model.add(
         ZLayer.Dense(16,
                      W_regularizer=regularizers.l2(0.001),
                      activation='relu',
                      input_shape=(10000, )))
     model.summary()
     model.compile(optimizer='rmsprop',
                   loss='binary_crossentropy',
                   metrics=['acc'])
Пример #7
0
    def init(cls,
             vocab=40990,
             seq_len=77,
             n_block=12,
             hidden_drop=0.1,
             attn_drop=0.1,
             n_head=12,
             hidden_size=768,
             embedding_drop=0.1,
             initializer_range=0.02,
             bidirectional=False,
             output_all_block=False):
        """
        vocab: vocabulary size of training data, default is 40990
        seq_len: max sequence length of training data, default is 77
        n_block: block number, default is 12
        hidden_drop: drop probability of projection, default is 0.1
        attn_drop: drop probability of attention, default is 0.1
        n_head: head number, default is 12
        hidden_size: is also embedding size
        embedding_drop: drop probability of embedding layer, default is 0.1
        initializer_range: weight initialization range, default is 0.02
        bidirectional: whether unidirectional or bidirectional, default is unidirectional
        output_all_block: whether output all blocks' output
        """
        if hidden_size < 0:
            raise TypeError(
                'hidden_size must be greater than 0 with default embeddding layer'
            )
        from bigdl.nn.layer import Squeeze
        word_input = InputLayer(input_shape=(seq_len, ))
        postion_input = InputLayer(input_shape=(seq_len, ))

        embedding = Sequential()
        embedding.add(Merge(layers=[word_input, postion_input], mode='concat'))\
            .add(Reshape([seq_len * 2]))\
            .add(Embedding(vocab, hidden_size, input_length=seq_len * 2,
                           weights=np.random.normal(0.0, initializer_range, (vocab, hidden_size))))\
            .add(Dropout(embedding_drop))\
            .add(Reshape((seq_len, 2, hidden_size)))\
            .add(KerasLayerWrapper(Sum(dimension=3, squeeze=True)))
        # walk around for bug #1208, need remove this line after the bug fixed
        embedding.add(KerasLayerWrapper(Squeeze(dim=3)))

        shape = ((seq_len, ), (seq_len, ))
        return TransformerLayer(n_block,
                                hidden_drop,
                                attn_drop,
                                n_head,
                                initializer_range,
                                bidirectional,
                                output_all_block,
                                embedding,
                                input_shape=shape)
Пример #8
0
 def _build_model(sequence_length):
     model = Sequential()
     model.add(Embedding(20, 10, input_length=sequence_length))
     model.add(Convolution1D(4, 3))
     model.add(Flatten())
     model.add(Dense(5, activation="softmax"))
     return model
Пример #9
0
 def build_model(self):
     model = Sequential()
     model.add(Dense(24, input_dim=self.state_size, activation='relu'))
     model.add(Dense(24, activation='relu'))
     model.add(Dense(self.action_size, activation='linear'))
     model.summary()
     model.compile(loss='mse', optimizer=Adam(lr=self.learning_rate))
     return model
Пример #10
0
    def build_model(self):
        model = Sequential()
        model.add(InputLayer(input_shape=self.feature_shape)) \
            .add(LSTM(input_shape=self.feature_shape, output_dim=self.hidden_layers[0],
                      return_sequences=True))

        for ilayer in range(1, len(self.hidden_layers) - 1):
            model.add(LSTM(output_dim=self.hidden_layers[ilayer], return_sequences=True)) \
                .add(Dropout(self.dropouts[ilayer]))

        model.add(LSTM(self.hidden_layers[-1], return_sequences=False)) \
            .add(Dropout(self.dropouts[-1]))

        model.add(Dense(output_dim=1))
        return model
Пример #11
0
    def test_merge_method_seq_concat(self):
        zx1 = ZLayer.Input(shape=(10, ))
        zx2 = ZLayer.Input(shape=(10, ))
        zy1 = ZLayer.Dense(12, activation="sigmoid")(zx1)
        zbranch1_node = ZModel(zx1, zy1)(zx1)
        zbranch2 = ZSequential()
        zbranch2.add(ZLayer.Dense(12, input_dim=10))
        zbranch2_node = zbranch2(zx2)
        zz = ZLayer.merge([zbranch1_node, zbranch2_node], mode="concat")
        zmodel = ZModel([zx1, zx2], zz)

        kx1 = KLayer.Input(shape=(10, ))
        kx2 = KLayer.Input(shape=(10, ))
        ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
        kbranch1_node = KModel(kx1, ky1)(kx1)
        kbranch2 = KSequential()
        kbranch2.add(KLayer.Dense(12, input_dim=10))
        kbranch2_node = kbranch2(kx2)
        kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
        kmodel = KModel([kx1, kx2], kz)

        input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
        self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
Пример #12
0
def predict(model_path, image_path, top_n):
    sc = init_nncontext(
        "Image classification inference example using int8 quantized model")
    images = ImageSet.read(image_path, sc, image_codec=1)
    model = ImageClassifier.load_model(model_path)
    output = model.predict_image_set(images)
    label_map = model.get_config().label_map()

    # list of images composing uri and results in tuple format
    predicts = output.get_predict().collect()

    sequential = Sequential()
    sequential.add(Activation("softmax", input_shape=predicts[0][1][0].shape))
    for pre in predicts:
        (uri, probs) = pre
        out = sequential.forward(probs[0])
        sortedProbs = [(prob, index) for index, prob in enumerate(out)]
        sortedProbs.sort()
        print("Image : %s, top %d prediction result" % (uri, top_n))
        for i in range(top_n):
            print(
                "\t%s, %f" %
                (label_map[sortedProbs[999 - i][1]], sortedProbs[999 - i][0]))
Пример #13
0
    def test_merge_method_seq_concat(self):
        zx1 = ZLayer.Input(shape=(10, ))
        zx2 = ZLayer.Input(shape=(10, ))
        zy1 = ZLayer.Dense(12, activation="sigmoid")(zx1)
        zbranch1_node = ZModel(zx1, zy1)(zx1)
        zbranch2 = ZSequential()
        zbranch2.add(ZLayer.Dense(12, input_dim=10))
        zbranch2_node = zbranch2(zx2)
        zz = ZLayer.merge([zbranch1_node, zbranch2_node], mode="concat")
        zmodel = ZModel([zx1, zx2], zz)

        kx1 = KLayer.Input(shape=(10, ))
        kx2 = KLayer.Input(shape=(10, ))
        ky1 = KLayer.Dense(12, activation="sigmoid")(kx1)
        kbranch1_node = KModel(kx1, ky1)(kx1)
        kbranch2 = KSequential()
        kbranch2.add(KLayer.Dense(12, input_dim=10))
        kbranch2_node = kbranch2(kx2)
        kz = KLayer.merge([kbranch1_node, kbranch2_node], mode="concat")
        kmodel = KModel([kx1, kx2], kz)

        input_data = [np.random.random([2, 10]), np.random.random([2, 10])]
        self.compare_layer(kmodel, zmodel, input_data, self.convert_two_dense)
Пример #14
0
 def compare_layer(self, klayer, zlayer, input_data, weight_converter=None,
                   is_training=False, rtol=1e-6, atol=1e-6):
     """
     Compare forward results for Keras layer against Zoo Keras API layer.
     """
     from keras.models import Sequential as KSequential
     from zoo.pipeline.api.keras.models import Sequential as ZSequential
     zmodel = ZSequential()
     zmodel.add(zlayer)
     kmodel = KSequential()
     kmodel.add(klayer)
     koutput = kmodel.predict(input_data)
     from zoo.pipeline.api.keras.layers import BatchNormalization
     if isinstance(zlayer, BatchNormalization):
         k_running_mean = K.eval(klayer.running_mean)
         k_running_std = K.eval(klayer.running_std)
         zlayer.set_running_mean(k_running_mean)
         zlayer.set_running_std(k_running_std)
     if kmodel.get_weights():
         zmodel.set_weights(weight_converter(klayer, kmodel.get_weights()))
     zmodel.training(is_training)
     zoutput = zmodel.forward(input_data)
     self.assert_allclose(zoutput, koutput, rtol=rtol, atol=atol)
Пример #15
0
 def build_model(self):
     model = Sequential()
     model.add(
         InputLayer(input_shape=(self.sequence_length, self.token_length)))
     if self.encoder.lower() == 'cnn':
         model.add(
             Convolution1D(self.encoder_output_dim, 5, activation='relu'))
         model.add(GlobalMaxPooling1D())
     elif self.encoder.lower() == 'lstm':
         model.add(LSTM(self.encoder_output_dim))
     elif self.encoder.lower() == 'gru':
         model.add(GRU(self.encoder_output_dim))
     else:
         raise ValueError('Unsupported encoder: ' + self.encoder)
     model.add(Dense(128))
     model.add(Dropout(0.2))
     model.add(Activation('relu'))
     model.add(Dense(self.class_num, activation='softmax'))
     return model
def buildmodel():
    print("Now we build the model")
    model = Sequential()
    model.add(
        Convolution2D(32,
                      8,
                      8,
                      subsample=(4, 4),
                      border_mode='same',
                      input_shape=(img_rows, img_cols,
                                   img_channels)))  # 80*80*4
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
    model.add(Activation('relu'))
    model.add(Flatten())
    model.add(Dense(512))
    model.add(Activation('relu'))
    model.add(Dense(2))

    model.compile(loss='mse', optimizer='adam')
    print("We finish building the model")
    return model
Пример #17
0
 def _build_mode(self):
     print("Now we build the model")
     model = Sequential()
     model.add(Convolution2D(32, 8, 8, subsample=(4, 4), border_mode='same',
                             input_shape=(IMAGE_ROWS, IMAGE_COLS, IMAGE_CHANNELS)))  # 80*80*4
     model.add(Activation('relu'))
     model.add(Convolution2D(64, 4, 4, subsample=(2, 2), border_mode='same'))
     model.add(Activation('relu'))
     model.add(Convolution2D(64, 3, 3, subsample=(1, 1), border_mode='same'))
     model.add(Activation('relu'))
     model.add(Flatten())
     model.add(Dense(512))
     model.add(Activation('relu'))
     model.add(Dense(2))
     
     # get the 1 * 2 output represent each action's probability
     model.add(Activation('softmax'))
     return model
Пример #18
0
x_train = unroll(x_train,unroll_length)
x_test  = unroll(x_test,unroll_length)
y_train = y_train[-x_train.shape[0]:]
y_test  = y_test[-x_test.shape[0]:]

# see the shape
print("x_train", x_train.shape)
print("y_train", y_train.shape)
print("x_test", x_test.shape)
print("y_test", y_test.shape)

# Build the model
model = Sequential()

model.add(LSTM(
    input_shape=(x_train.shape[1], x_train.shape[-1]),
    output_dim=20,
    return_sequences=True))
model.add(Dropout(0.2))

model.add(LSTM(
    10,
    return_sequences=False))
model.add(Dropout(0.2))

model.add(Dense(
    output_dim=1))

model.compile(loss='mse', optimizer='rmsprop')

%%time
# Train the model
Пример #19
0
df = pd.read_csv("../resources/datasets/dataset-1_converted.csv")

trainDf, testDf = train_test_split(df, test_size=0.2)
print("Created Train and Test Df\n")

predictionColumn = 'slotOccupancy'

x = trainDf.drop(columns=[predictionColumn])
inputs = len(x.columns)

y = trainDf[[predictionColumn]]
outputs = len(y.columns)

model = Sequential()
model.add(Dense(output_dim=inputs, activation="relu", input_shape=(inputs, )))
model.add(Dense(output_dim=inputs, activation="relu"))
model.add(Dense(output_dim=outputs))

model.compile(optimizer="adam", loss="mean_squared_error")

model.summary()
print("Created Sequential Model!\n")

xNumpy = x.to_numpy()
yNumpy = y.to_numpy()
# model.fit(x=xNumpy, y=yNumpy, nb_epoch=1, distributed=False)

import tensorflow as tf

weights = np.array(model.get_weights(), dtype=object)
Пример #20
0
 def build_model(self):
     model = Sequential()
     model.add(InputLayer(input_shape=(self.sequence_length, self.token_length)))
     if self.encoder.lower() == 'cnn':
         model.add(Convolution1D(self.encoder_output_dim, 5, activation='relu'))
         model.add(GlobalMaxPooling1D())
     elif self.encoder.lower() == 'lstm':
         model.add(LSTM(self.encoder_output_dim))
     elif self.encoder.lower() == 'gru':
         model.add(GRU(self.encoder_output_dim))
     else:
         raise ValueError('Unsupported encoder: ' + self.encoder)
     model.add(Dense(128))
     model.add(Dropout(0.2))
     model.add(Activation('relu'))
     model.add(Dense(self.class_num, activation='softmax'))
     return model
Пример #21
0
train_rdd = sc.parallelize(t_train_img).zip(sc.parallelize(
    train_lbl)).map(lambda (feature, label): Sample.from_ndarray(
        feature, label + 1)  # 如果用 keras.fit 则需要 -1.
                    )
test_rdd = sc.parallelize(t_test_img).zip(sc.parallelize(test_lbl)).map(
    lambda (feature, label): Sample.from_ndarray(feature, label + 1))

# 用 Zoo-Keras 定义模型的网络结构.
input_shape = (NUM_CLASS_LABEL, NUM_IMAGE_CHANNEL, IMAGE_SIZE, IMAGE_SIZE)
both_input = Input(shape=input_shape)

convolve_net = Sequential()
convolve_net.add(
    Convolution2D(
        nb_filter=LAYER_1_NUM_CHANNEL,  # 通道: 4 -> 8.
        nb_row=CONVOLVE_1_KERNEL_SIZE,  # 尺寸: 32 - 9 + 1 = 24
        nb_col=CONVOLVE_1_KERNEL_SIZE,
        activation="relu",
        input_shape=(NUM_IMAGE_CHANNEL, IMAGE_SIZE, IMAGE_SIZE),
        W_regularizer=L2Regularizer(args.penalty_rate)))
convolve_net.add(
    AveragePooling2D(
        pool_size=(
            POOLING_1_WINDOW_SIZE,  # 尺寸: 24 / 2 = 12.
            POOLING_1_WINDOW_SIZE),
        strides=(POOLING_1_STRIDE_SIZE, POOLING_1_STRIDE_SIZE)))
convolve_net.add(BatchNormalization())
convolve_net.add(
    Convolution2D(
        nb_filter=LAYER_2_NUM_CHANNEL,  # 通道: 8 -> 2.
        nb_row=CONVOLVE_2_KERNEL_SIZE,  # 尺寸: 12 - 5 + 1 = 8.
        nb_col=CONVOLVE_2_KERNEL_SIZE,
Пример #22
0
 def build_model(self):
     model = Sequential()
     model.add(self.embedding)
     if self.encoder.lower() == 'cnn':
         model.add(Convolution1D(self.encoder_output_dim, 5, activation='relu'))
         model.add(GlobalMaxPooling1D())
     elif self.encoder.lower() == 'lstm':
         model.add(LSTM(self.encoder_output_dim))
     elif self.encoder.lower() == 'gru':
         model.add(GRU(self.encoder_output_dim))
     else:
         raise ValueError('Unsupported encoder for TextClassifier: ' + self.encoder)
     model.add(Dense(128))
     model.add(Dropout(0.2))
     model.add(Activation('relu'))
     model.add(Dense(self.class_num, activation='softmax'))
     return model