def test_tf_optimizer_with_sparse_gradient_using_keras(self):
        import tensorflow as tf

        ids = np.random.randint(0, 10, size=[40])
        labels = np.random.randint(0, 5, size=[40])
        id_rdd = self.sc.parallelize(ids)
        label_rdd = self.sc.parallelize(labels)
        training_rdd = id_rdd.zip(label_rdd).map(lambda x: [x[0], x[1]])

        dataset = TFDataset.from_rdd(training_rdd,
                                     names=["ids", "labels"],
                                     shapes=[[], []],
                                     types=[tf.int32, tf.int32],
                                     batch_size=8)
        from tensorflow.python.ops import variable_scope

        def variable_creator(**kwargs):
            kwargs["use_resource"] = False
            return variable_scope.default_variable_creator(None, **kwargs)

        getter = lambda next_creator, **kwargs: variable_creator(**kwargs)
        with variable_scope.variable_creator_scope(getter):
            words_input = tf.keras.layers.Input(shape=(), name='words_input')
            embedding_layer = tf.keras.layers.Embedding(input_dim=10,
                                                        output_dim=5,
                                                        name='word_embedding')
            word_embeddings = embedding_layer(words_input)
            embedding = tf.keras.layers.Flatten()(word_embeddings)
            output = tf.keras.layers.Dense(5, activation="softmax")(embedding)
            model = tf.keras.models.Model(inputs=[words_input],
                                          outputs=[output])
            model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy")\

        optimizer = TFOptimizer.from_keras(model, dataset)
        optimizer.optimize()
Exemplo n.º 2
0
    def test_tf_optimizer_with_sparse_gradient_using_keras(self):
        import tensorflow as tf

        ids = np.random.randint(0, 10, size=[40])
        labels = np.random.randint(0, 5, size=[40])
        id_rdd = self.sc.parallelize(ids)
        label_rdd = self.sc.parallelize(labels)
        training_rdd = id_rdd.zip(label_rdd).map(lambda x: [x[0], x[1]])

        dataset = TFDataset.from_rdd(training_rdd,
                                     names=["ids", "labels"],
                                     shapes=[[], []],
                                     types=[tf.int32, tf.int32],
                                     batch_size=8)
        words_input = tf.keras.layers.Input(shape=(), name='words_input')
        embedding_layer = tf.keras.layers.Embedding(input_dim=10,
                                                    output_dim=5,
                                                    name='word_embedding')
        word_embeddings = embedding_layer(words_input)
        embedding = tf.keras.layers.Flatten()(word_embeddings)
        output = tf.keras.layers.Dense(5, activation="softmax")(embedding)
        model = tf.keras.models.Model(inputs=[words_input], outputs=[output])
        model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy")

        optimizer = TFOptimizer.from_keras(model, dataset)
        optimizer.optimize()
Exemplo n.º 3
0
    def _fit_distributed(self, dataset, validation_split, epochs, **kwargs):
        if not self.tf_optimizer:
            self.tf_optimizer = TFOptimizer.from_keras(
                self.model, dataset, val_spilt=validation_split, **kwargs)
        else:
            self.tf_optimizer.refresh_weights()

        end_epoch = self.tf_optimizer_done_epochs + epochs
        self.tf_optimizer.optimize(MaxEpoch(end_epoch))
        self.tf_optimizer_done_epochs = end_epoch
Exemplo n.º 4
0
    def _fit_distributed(self, dataset, validation_split, epochs, **kwargs):
        self.tf_optimizer = TFOptimizer.from_keras(self.model,
                                                   dataset,
                                                   val_split=validation_split,
                                                   **kwargs)

        if self.train_summary is not None:
            self.tf_optimizer.set_train_summary(self.train_summary)

        if self.val_summary is not None:
            self.tf_optimizer.set_val_summary(self.val_summary)

        self.tf_optimizer.optimize(MaxEpoch(epochs))
Exemplo n.º 5
0
def main(max_epoch, data_num):
    sc = init_nncontext()

    # get data, pre-process and create TFDataset
    def get_data_rdd(dataset):
        (images_data, labels_data) = mnist.read_data_sets("/tmp/mnist", dataset)
        image_rdd = sc.parallelize(images_data[:data_num])
        labels_rdd = sc.parallelize(labels_data[:data_num])
        rdd = image_rdd.zip(labels_rdd) \
            .map(lambda rec_tuple: [normalizer(rec_tuple[0], mnist.TRAIN_MEAN, mnist.TRAIN_STD),
                                    np.array(rec_tuple[1])])
        return rdd

    training_rdd = get_data_rdd("train")
    testing_rdd = get_data_rdd("test")
    dataset = TFDataset.from_rdd(training_rdd,
                                 names=["features", "labels"],
                                 shapes=[[28, 28, 1], []],
                                 types=[tf.float32, tf.int32],
                                 batch_size=280,
                                 val_rdd=testing_rdd
                                 )

    data = Input(shape=[28, 28, 1])

    x = Flatten()(data)
    x = Dense(64, activation='relu')(x)
    x = Dense(64, activation='relu')(x)
    predictions = Dense(10, activation='softmax')(x)

    model = Model(input=data, output=predictions)

    model.compile(optimizer='rmsprop',
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy'])

    optimizer = TFOptimizer.from_keras(model, dataset)

    optimizer.set_train_summary(TrainSummary("/tmp/az_lenet", "lenet"))
    optimizer.set_val_summary(ValidationSummary("/tmp/az_lenet", "lenet"))
    # kick off training
    optimizer.optimize(end_trigger=MaxEpoch(max_epoch))

    saver = tf.train.Saver()
    saver.save(optimizer.sess, "/tmp/lenet/")
Exemplo n.º 6
0
 def _fit_distributed(self, dataset, validation_split, epochs, **kwargs):
     self.tf_optimizer = TFOptimizer.from_keras(self.model,
                                                dataset,
                                                val_spilt=validation_split,
                                                **kwargs)
     self.tf_optimizer.optimize(MaxEpoch(epochs))
Exemplo n.º 7
0
        output_dim=FC_LINEAR_DIMENSION,  # 尺寸: 32 -> 64.
        activation="sigmoid"))

# BigDL 不支持 parameter sharing, 不得已而为之.
both_feature = TimeDistributed(layer=convolve_net,
                               input_shape=input_shape)(both_input)

encode_left = both_feature.index_select(1, 0)
encode_right = both_feature.index_select(1, 1)

distance = autograd.abs(encode_left - encode_right)
predict = Dense(output_dim=NUM_CLASS_LABEL, activation="sigmoid")(distance)

siamese_net = Model(input=both_input, output=predict)
siamese_net.compile(optimizer="adam",
                    loss='sparse_categorical_crossentropy',
                    metrics=["accuracy"])

# 构造分布式的数据集对象.
data_set = TFDataset.from_rdd(train_rdd,
                              shapes=[input_shape, [1]],
                              batch_size=args.batch_size,
                              val_rdd=test_rdd)

optimizer = TFOptimizer.from_keras(siamese_net, data_set)
app_name = "Siamese Network"

optimizer.set_train_summary(TrainSummary("tmp", app_name))
optimizer.set_val_summary(ValidationSummary("tmp", app_name))
optimizer.optimize(end_trigger=MaxEpoch(args.num_epoch))