예제 #1
0
def top_level_task():
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape)

    input_tensor = Input(shape=(784, ))

    output = Dense(512, input_shape=(784, ), activation="relu")(input_tensor)
    output2 = Dense(512, activation="relu")(output)
    output3 = Dense(num_classes)(output2)
    output4 = Activation("softmax")(output3)

    model = Model(input_tensor, output4)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(
        optimizer=opt,
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy',
                 metrics.SparseCategoricalCrossentropy()])

    model.fit(x_train,
              y_train,
              epochs=10,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.MNIST_MLP),
                  EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)
              ])
예제 #2
0
    def compile(self,
                optimizer,
                loss=None,
                metrics=None,
                loss_weights=None,
                weighted_metrics=None,
                run_eagerly=None,
                comp_mode=ff.CompMode.TRAINING,
                **kwargs):

        if loss_weights != None:
            assert 0, "loss_weights is not supported"
        if weighted_metrics != None:
            assert 0, "weighted_metrics is not supported"
        if run_eagerly != None:
            assert 0, "run_eagerly is not supported"

        assert loss != None, "loss is None"
        if isinstance(loss, keras_losses.Loss) == True:
            self._loss = loss
        elif loss == 'categorical_crossentropy':
            self._loss = keras_losses.CategoricalCrossentropy()
        elif loss == 'sparse_categorical_crossentropy':
            self._loss = keras_losses.SparseCategoricalCrossentropy()
            self._label_type = ff.DataType.DT_INT32
        elif loss == 'mean_squared_error':
            self._loss = keras_losses.MeanSquaredError()
        else:
            assert 0, 'Unsupported loss'

        assert metrics != None, "metrics is None"
        assert isinstance(metrics, list) == True, 'Metrics should be a list'
        for metric in metrics:
            if isinstance(metric, keras_metrics.Metric) == True:
                self._metrics.append(metric)
            elif metric == 'accuracy':
                self._metrics.append(keras_metrics.Accuracy())
            elif metric == 'categorical_crossentropy':
                self._metrics.append(keras_metrics.CategoricalCrossentropy())
            elif metric == 'sparse_categorical_crossentropy':
                self._metrics.append(
                    keras_metrics.SparseCategoricalCrossentropy())
            elif metric == 'mean_squared_error':
                self._metrics.append(keras_metrics.MeanSquaredError())
            elif metric == 'root_mean_squared_error':
                self._metrics.append(keras_metrics.RootMeanSquaredError())
            elif metric == 'mean_absolute_error':
                self._metrics.append(keras_metrics.MeanAbsoluteError())
            else:
                assert 0, 'Unsupported metric'

        self._ffmodel = ff.FFModel(self._ffconfig)
        self._create_input_tensors()
        self._create_flexflow_layers()

        self._verify_output_tensors()
        self._verify_input_tensors()

        self._ffoptimizer = optimizer
        self._create_optimizer()
        metrics_type = []
        for metric in self._metrics:
            metrics_type.append(metric.type)
        self._ffmodel.optimizer = optimizer.ffhandle
        self._ffmodel.compile(loss_type=self._loss.type,
                              metrics=metrics_type,
                              comp_mode=comp_mode)
        self._create_label_tensor()
        fflogger.debug("%s, %s, %s, %s" %
                       (str(self._input_tensors[0]), str(self._output_tensor),
                        str(self._input_tensors[0].ffhandle),
                        str(self._output_tensor.ffhandle)))
예제 #3
0
    def compile(self,
                optimizer,
                loss=None,
                metrics=None,
                loss_weights=None,
                weighted_metrics=None,
                run_eagerly=None,
                comp_mode=None,
                **kwargs):
        if loss_weights != None:
            assert 0, "loss_weights is not supported"
        if weighted_metrics != None:
            assert 0, "weighted_metrics is not supported"
        if run_eagerly != None:
            assert 0, "run_eagerly is not supported"

        assert loss != None, "loss is None"
        if loss == 'categorical_crossentropy':
            self._loss = ff_keras_losses.CategoricalCrossentropy()
        elif loss == 'sparse_categorical_crossentropy':
            self._loss = ff_keras_losses.SparseCategoricalCrossentropy()
            self._label_type = ff.DataType.DT_INT32
        elif loss == 'mean_squared_error':
            self._loss = ff_keras_losses.MeanSquaredError()
        else:
            assert 0, 'Unsupported loss'

        assert metrics != None, "metrics is None"
        assert isinstance(metrics, list) == True, 'Metrics should be a list'
        for metric in metrics:
            if metric == 'accuracy':
                self._metrics.append(ff_keras_metrics.Accuracy())
            elif metric == 'categorical_crossentropy':
                self._metrics.append(
                    ff_keras_metrics.CategoricalCrossentropy())
            elif metric == 'sparse_categorical_crossentropy':
                self._metrics.append(
                    ff_keras_metrics.SparseCategoricalCrossentropy())
            elif metric == 'mean_squared_error':
                self._metrics.append(ff_keras_metrics.MeanSquaredError())
            elif metric == 'root_mean_squared_error':
                self._metrics.append(ff_keras_metrics.RootMeanSquaredError())
            elif metric == 'mean_absolute_error':
                self._metrics.append(ff_keras_metrics.MeanAbsoluteError())
            else:
                assert 0, 'Unsupported metric'

        self._ffmodel = ff.FFModel(self._ffconfig)
        self._create_input_tensors()
        self._create_flexflow_layers()

        layers = self._ffmodel.get_layers()
        for l in layers:
            print(l, layers[l])

        if isinstance(optimizer, tf_keras_optimizer.Optimizer) == True:
            if isinstance(optimizer, tf_keras_optimizer.SGD) == True:
                self._ffoptimizer = ff_keras_optimizer.SGD(
                    learning_rate=optimizer.learning_rate.numpy(),
                    momentum=optimizer.momentum.numpy(),
                    nesterov=optimizer.nesterov)
            elif isinstance(optimizer, tf_keras_optimizer.Adam) == True:
                self._ffoptimizer = ff_keras_optimizer.Adam(
                    learning_rate=optimizer.learning_rate.numpy(),
                    beta_1=optimizer.beta_1.numpy(),
                    beta_2=optimizer.beta_2.numpy(),
                    epsilon=optimizer.epsilon.numpy())
            else:
                assert 0, "Unsupported optimizer"
        elif type(optimizer) == str:
            if optimizer == 'SGD':
                self._ffoptimizer = ff_keras_optimizer.SGD()
            elif optimizer == 'Adam':
                self._ffoptimizer = ff_keras_optimizer.Adam()
            else:
                assert 0, "Unsupported optimizer"
        else:
            assert 0, "Unsupported optimizer"

        self._create_optimizer()
        metrics_type = []
        for metric in self._metrics:
            metrics_type.append(metric.type)
        self._ffmodel.compile(optimizer=self._ffoptimizer.ffhandle,
                              loss_type=self._loss.type,
                              metrics=metrics_type,
                              comp_mode=comp_mode)
        self._create_label_tensor()