Beispiel #1
0
def mlp():
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    #y_train = np.random.randint(1, 9, size=(len(y_train),1), dtype='int32')
    print("shape: ", x_train.shape)

    input_tensor = Input(batch_shape=[0, 784], dtype="float32")

    output = Dense(512, input_shape=(784, ), activation="relu")(input_tensor)
    output2 = Dense(512, activation="relu")(output)
    output3 = Dense(num_classes)(output2)
    output4 = Activation("softmax")(output3)

    model = Model(input_tensor, output4)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=1)
Beispiel #2
0
def mlp_net2net():
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    #y_train = np.random.randint(1, 9, size=(len(y_train),1), dtype='int32')
    print("shape: ", x_train.shape)

    #teacher

    input_tensor1 = Input(batch_shape=[0, 784], dtype="float32")

    d1 = Dense(512, input_shape=(784, ), activation="relu")
    d2 = Dense(512, activation="relu")
    d3 = Dense(num_classes)

    output = d1(input_tensor1)
    output = d2(output)
    output = d3(output)
    output = Activation("softmax")(output)

    teacher_model = Model(input_tensor1, output)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    teacher_model.compile(optimizer=opt)

    teacher_model.fit(x_train, y_train, epochs=1)

    d1_kernel, d1_bias = d1.get_weights(teacher_model.ffmodel)
    d2_kernel, d2_bias = d2.get_weights(teacher_model.ffmodel)
    d3_kernel, d3_bias = d3.get_weights(teacher_model.ffmodel)

    # student

    input_tensor2 = Input(batch_shape=[0, 784], dtype="float32")

    sd1 = Dense(512, input_shape=(784, ), activation="relu")
    sd2 = Dense(512, activation="relu")
    sd3 = Dense(num_classes)

    output = sd1(input_tensor2)
    output = sd2(output)
    output = sd3(output)
    output = Activation("softmax")(output)

    student_model = Model(input_tensor2, output)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    student_model.compile(optimizer=opt)

    sd1.set_weights(student_model.ffmodel, d1_kernel, d1_bias)
    sd2.set_weights(student_model.ffmodel, d2_kernel, d2_bias)
    sd3.set_weights(student_model.ffmodel, d3_kernel, d3_bias)

    student_model.fit(x_train, y_train, epochs=1)
Beispiel #3
0
def top_level_task():
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape)

    input_tensor = Input(shape=(784, ))

    output = Dense(512, input_shape=(784, ), activation="relu")(input_tensor)
    output2 = Dense(512, activation="relu")(output)
    output3 = Dense(num_classes)(output2)
    output4 = Activation("softmax")(output3)

    model = Model(input_tensor, output4)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(
        optimizer=opt,
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy',
                 metrics.SparseCategoricalCrossentropy()])

    model.fit(x_train,
              y_train,
              epochs=10,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.MNIST_MLP),
                  EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)
              ])
Beispiel #4
0
def top_level_task():

    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    #y_train = np.random.randint(1, 9, size=(len(y_train),1), dtype='int32')
    print("shape: ", x_train.shape)

    model = Sequential()
    model.add(Dense(512, input_shape=(784, ), activation="relu"))
    model.add(Dense(512, activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    print(model.summary())

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=1)
Beispiel #5
0
def top_level_task():

    num_classes = 10

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape, x_train.__array_interface__["strides"])

    layers = [
        Conv2D(filters=32,
               input_shape=(1, 28, 28),
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"),
        Flatten()
    ]
    model1 = Sequential(layers)

    input_tensor = Input(shape=(12544, ), dtype="float32")

    output = Dense(512, input_shape=(12544, ), activation="relu")(input_tensor)
    output = Dense(num_classes)(output)
    output = Activation("softmax")(output)

    model2 = Model(input_tensor, output)

    model = Sequential()
    model.add(model1)
    model.add(model2)

    print(model.summary())

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])

    print(model.summary())

    model.fit(x_train,
              y_train,
              epochs=5,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.MNIST_CNN),
                  EpochVerifyMetrics(ModelAccuracy.MNIST_CNN)
              ])
Beispiel #6
0
def top_level_task():

    backend.set_image_data_format('channels_first')

    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape)

    input_tensor = Input(shape=(784))
    output = Dense(512, activation="relu")(input_tensor)
    output = Dense(512, activation="relu")(output)
    output = Dense(num_classes)(output)
    output = Activation("softmax")(output)
    model = Model(inputs={1: input_tensor}, outputs=output)

    print(model.summary())

    opt = optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])
    model.fit(x_train, y_train, batch_size=64, epochs=1)
def top_level_task():
  
  num_classes = 10
  
  (x_train, y_train), (x_test, y_test) = mnist.load_data()
  
  x_train = x_train.reshape(60000, 784)
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))
  print("shape: ", x_train.shape)
  
  model = Sequential()
  d1 = Dense(512, input_shape=(784,), kernel_initializer=GlorotUniform(123), bias_initializer=Zeros())
  model.add(d1)
  model.add(Activation('relu'))
  model.add(Dropout(0.2))
  model.add(Dense(512, activation="relu"))
  model.add(Dropout(0.2))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
  
  print(model.summary())

  model.fit(x_train, y_train, epochs=20, callbacks=[VerifyMetrics(ModelAccuracy.MNIST_MLP), EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)])
  model.evaluate(x=x_train, y=y_train)
Beispiel #8
0
def top_level_task():
    num_classes = 10

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    input_tensor = Input(shape=(1, 28, 28), dtype="float32")

    output = Conv2D(filters=32,
                    input_shape=(1, 28, 28),
                    kernel_size=(3, 3),
                    strides=(1, 1),
                    padding=(1, 1),
                    activation="relu")(input_tensor)
    #  output = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output)
    output = Conv2D(filters=64,
                    kernel_size=(3, 3),
                    strides=(1, 1),
                    padding=(1, 1),
                    kernel_initializer=GlorotUniform(123),
                    bias_initializer=Zeros())(output)
    output = Activation('relu')(output)
    output = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                          padding="valid")(output)
    output = Dropout(0.25)(output)
    output = Flatten()(output)
    output = Dense(128, activation="relu")(output)
    output = Dense(num_classes)(output)
    output = Activation("softmax")(output)

    model = Model(input_tensor, output)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])

    print(model.summary())

    flatten1 = model.get_layer(name='flat')
    t1 = flatten1.output_tensors[0]
    t2 = flatten1.input_tensors[0]
    print("t1: ", t1.to_layers, " ", t1.from_layer)
    print("t2: ", t2.to_layers, " ", t2.from_layer)

    model.fit(x_train,
              y_train,
              epochs=5,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.MNIST_CNN),
                  EpochVerifyMetrics(ModelAccuracy.MNIST_CNN)
              ])
def top_level_task():
    ffconfig = FFConfig()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
    ffmodel = FFModel(ffconfig)

    dims_input = [ffconfig.batch_size, 784]
    input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)

    num_samples = 60000

    kernel_init = UniformInitializer(12, -1, 1)
    t = ffmodel.dense(input_tensor,
                      512,
                      ActiMode.AC_MODE_RELU,
                      kernel_initializer=kernel_init)
    t = ffmodel.dense(t, 512, ActiMode.AC_MODE_RELU)
    t = ffmodel.dense(t, 10)

    t = ffmodel.softmax(t)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.optimizer = ffoptimizer
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label_tensor = ffmodel.label_tensor

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dataloader_input = ffmodel.create_data_loader(input_tensor, x_train)
    dataloader_label = ffmodel.create_data_loader(label_tensor, y_train)

    ffmodel.init_layers()

    epochs = ffconfig.epochs

    ts_start = ffconfig.get_current_time()

    ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)
    ffmodel.eval(x=dataloader_input, y=dataloader_label)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    perf_metrics = ffmodel.get_perf_metrics()

    return perf_metrics
Beispiel #10
0
def top_level_task():
  ffconfig = FFConfig()
  ffconfig.parse_args()
  print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.get_batch_size(), ffconfig.get_workers_per_node(), ffconfig.get_num_nodes()))
  ffmodel = FFModel(ffconfig)
  
  dims1 = [ffconfig.get_batch_size(), 784]
  input1 = ffmodel.create_tensor(dims1, DataType.DT_FLOAT);
  
  num_samples = 60000
  
  onnx_model = ONNXModel("mnist_mlp.onnx")
  t = onnx_model.apply(ffmodel, {"input.1": input1})

  ffoptimizer = SGDOptimizer(ffmodel, 0.01)
  ffmodel.set_sgd_optimizer(ffoptimizer)
  ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
  label = ffmodel.get_label_tensor()
  
  (x_train, y_train), (x_test, y_test) = mnist.load_data()
  
  x_train = x_train.reshape(60000, 784)
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))
  
  dims_full_input = [num_samples, 784]
  full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)

  dims_full_label = [num_samples, 1]
  full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)

  full_input.attach_numpy_array(ffconfig, x_train)
  full_label.attach_numpy_array(ffconfig, y_train)

  dataloader_input = SingleDataLoader(ffmodel, input1, full_input, num_samples, DataType.DT_FLOAT)
  dataloader_label = SingleDataLoader(ffmodel, label, full_label, num_samples, DataType.DT_INT32)

  full_input.detach_numpy_array(ffconfig)
  full_label.detach_numpy_array(ffconfig)

  ffmodel.init_layers()

  epochs = ffconfig.get_epochs()

  ts_start = ffconfig.get_current_time()
  
  ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)

  ts_end = ffconfig.get_current_time()
  run_time = 1e-6 * (ts_end - ts_start);
  print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
  
  perf_metrics = ffmodel.get_perf_metrics()
  accuracy = perf_metrics.get_accuracy()
  if accuracy < ModelAccuracy.MNIST_MLP.value:
    assert 0, 'Check Accuracy'
Beispiel #11
0
def top_level_task():
    ffconfig = FFConfig()
    ffconfig.parse_args()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.get_batch_size(), ffconfig.get_workers_per_node(),
           ffconfig.get_num_nodes()))
    ffmodel = FFModel(ffconfig)

    dims_input = [ffconfig.get_batch_size(), 1, 28, 28]
    input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)

    num_samples = 60000

    t = ffmodel.conv2d(input_tensor, 32, 3, 3, 1, 1, 1, 1,
                       ActiMode.AC_MODE_RELU, True)
    t = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU, True)
    t = ffmodel.pool2d(t, 2, 2, 2, 2, 0, 0)
    t = ffmodel.flat(t)
    t = ffmodel.dense(t, 128, ActiMode.AC_MODE_RELU)
    t = ffmodel.dense(t, 10)
    t = ffmodel.softmax(t)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.set_sgd_optimizer(ffoptimizer)
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label_tensor = ffmodel.get_label_tensor()

    img_rows, img_cols = 28, 28
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dataloader_input = ffmodel.create_data_loader(input_tensor, x_train)
    dataloader_label = ffmodel.create_data_loader(label_tensor, y_train)

    ffmodel.init_layers()

    epochs = ffconfig.get_epochs()

    ts_start = ffconfig.get_current_time()

    ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    perf_metrics = ffmodel.get_perf_metrics()

    return perf_metrics
def top_level_task():
  num_classes = 10

  (x_train, y_train), (x_test, y_test) = mnist.load_data()

  x_train = x_train.reshape(60000, 784)
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))
  print("shape: ", x_train.shape)
  
  # input_tensor1 = Input(shape=(256,))
  input_tensor11 = Input(shape=(256,))
  input_tensor12 = Input(shape=(256,))
  input_tensor2 = Input(shape=(256,))
  input_tensor3 = Input(shape=(256,))
  input_tensor4 = Input(shape=(256,))
  
  # t1 = Dense(512, activation="relu", name="dense1")(input_tensor1)
  # t1 = Dense(512, activation="relu", name="dense12")(t1)
  # model1 = Model(input_tensor1, t1)
  t11 = Dense(512, activation="relu", name="dense1")(input_tensor11)
  model11 = Model(input_tensor11, t11)
  t12 = model11(input_tensor12)
  t1 = Dense(512, activation="relu", name="dense12")(t12)
  model1 = Model(input_tensor12, t1)
  
  t2 = Dense(512, activation="relu", name="dense2")(input_tensor2)
  t2 = Dense(512, activation="relu", name="dense22")(t2)
  model2 = Model(input_tensor2, t2)
  t3 = Dense(512, activation="relu", name="dense3")(input_tensor3)
  t3 = Dense(512, activation="relu", name="dense33")(t3)
  model3 = Model(input_tensor3, t3)
  t4 = Dense(512, activation="relu", name="dense4")(input_tensor4)
  t4 = Dense(512, activation="relu", name="dense44")(t4)
  model4 = Model(input_tensor4, t4)
  
  input_tensor = Input(shape=(784,))
  t00 = Input(shape=(784,), name="input_00")
  t01 = Input(shape=(784,), name="input_01")
  t1 = model1(input_tensor)
  t2 = model2(input_tensor)
  t3 = model3(input_tensor)
  t4 = model4(input_tensor)
  output = Concatenate(axis=1)([t00, t01, t1, t2, t3, t4])
  output = Dense(num_classes)(output)
  output = Activation("softmax")(output)
  
  model = Model([t00, t01, input_tensor], output)

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
  
  print(model.summary())

  model.fit([x_train, x_train, x_train], y_train, epochs=10, callbacks=[VerifyMetrics(ModelAccuracy.MNIST_MLP), EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)])
Beispiel #13
0
def top_level_task():
  ffconfig = FFConfig()
  ffconfig.parse_args()
  print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %(ffconfig.get_batch_size(), ffconfig.get_workers_per_node(), ffconfig.get_num_nodes()))
  ffmodel = FFModel(ffconfig)

  dims = [ffconfig.get_batch_size(), 784]
  input_tensor = ffmodel.create_tensor(dims, DataType.DT_FLOAT);

  num_samples = 60000

  torch_model = PyTorchModel("mlp.ff")
  
  output_tensors = torch_model.apply(ffmodel, [input_tensor])

  ffoptimizer = SGDOptimizer(ffmodel, 0.01)
  ffmodel.set_sgd_optimizer(ffoptimizer)
  ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY, metrics=[MetricsType.METRICS_ACCURACY, MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY])
  label = ffmodel.get_label_tensor()

  (x_train, y_train), (x_test, y_test) = mnist.load_data()

  print(x_train.shape)
  x_train = x_train.reshape(60000, 784)
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))

  dims_full_input = [num_samples, 784]
  full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)

  dims_full_label = [num_samples, 1]
  full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)

  full_input.attach_numpy_array(ffconfig, x_train)
  full_label.attach_numpy_array(ffconfig, y_train)

  dataloader_input = SingleDataLoader(ffmodel, input_tensor, full_input, num_samples, DataType.DT_FLOAT)
  dataloader_label = SingleDataLoader(ffmodel, label, full_label, num_samples, DataType.DT_INT32)

  full_input.detach_numpy_array(ffconfig)
  full_label.detach_numpy_array(ffconfig)

  ffmodel.init_layers()

  epochs = ffconfig.get_epochs()

  ts_start = ffconfig.get_current_time()
  
  ffmodel.train((dataloader_input, dataloader_label), epochs)

  ts_end = ffconfig.get_current_time()
  run_time = 1e-6 * (ts_end - ts_start);
  print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %(epochs, run_time, num_samples * epochs / run_time));
def top_level_task(test_type=1):
    ffconfig = FFConfig()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
    ffmodel = FFModel(ffconfig)

    dims1 = [ffconfig.batch_size, 784]
    input1 = ffmodel.create_tensor(dims1, DataType.DT_FLOAT)

    num_samples = 60000

    if test_type == 1:
        onnx_model = ONNXModel("mnist_mlp_pt.onnx")
        t = onnx_model.apply(ffmodel, {"input.1": input1})
    else:
        onnx_model = ONNXModelKeras("mnist_mlp_keras.onnx", ffconfig, ffmodel)
        t = onnx_model.apply(ffmodel, {"input_1": input1})

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.optimizer = ffoptimizer
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label = ffmodel.label_tensor

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dataloader_input = ffmodel.create_data_loader(input1, x_train)
    dataloader_label = ffmodel.create_data_loader(label, y_train)

    ffmodel.init_layers()

    epochs = ffconfig.epochs

    ts_start = ffconfig.get_current_time()

    ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    perf_metrics = ffmodel.get_perf_metrics()
    accuracy = perf_metrics.get_accuracy()
    if accuracy < ModelAccuracy.MNIST_MLP.value:
        assert 0, 'Check Accuracy'
def top_level_task():
    backend.set_image_data_format('channels_first')
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape)

    input_tensor1 = Input(shape=(784, ))
    input_tensor2 = Input(shape=(784, ))
    input_tensor3 = Input(shape=(784, ))
    input_tensor4 = Input(shape=(784, ))

    t1 = Dense(512, activation="relu", name="dense1")(input_tensor1)
    t1 = Dense(512, activation="relu", name="dense12")(t1)
    model1 = Model(input_tensor1, t1)
    t2 = Dense(512, activation="relu", name="dense2")(input_tensor2)
    t2 = Dense(512, activation="relu", name="dense22")(t2)
    model2 = Model(input_tensor2, t2)
    t3 = Dense(512, activation="relu", name="dense3")(input_tensor3)
    t3 = Dense(512, activation="relu", name="dense33")(t3)
    model3 = Model(input_tensor3, t3)
    t4 = Dense(512, activation="relu", name="dense4")(input_tensor4)
    t4 = Dense(512, activation="relu", name="dense44")(t4)
    model4 = Model(input_tensor4, t4)

    input_tensor1 = Input(shape=(784, ))
    input_tensor2 = Input(shape=(784, ))
    t1 = model1(input_tensor1)
    t2 = model2(input_tensor1)
    t3 = model3(input_tensor2)
    t4 = model4(input_tensor2)
    output = Concatenate(axis=1)([t1, t2, t3, t4])
    output = Dense(num_classes)(output)
    output = Activation("softmax")(output)

    model = Model({5: input_tensor1, 6: input_tensor2}, output)

    opt = optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])

    print(model.summary())

    model.fit([x_train, x_train], y_train, epochs=1)
Beispiel #16
0
def top_level_task():

    num_classes = 10

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape, x_train.__array_interface__["strides"])

    # model = Sequential()
    # model.add(Conv2D(filters=32, input_shape=(1,28,28), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
    # model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
    # model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
    # model.add(Flatten())
    # model.add(Dense(128, activation="relu"))
    # model.add(Dense(num_classes))
    # model.add(Activation("softmax"))

    layers = [
        Conv2D(filters=32,
               input_shape=(1, 28, 28),
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"),
        Flatten(),
        Dense(128, activation="relu"),
        Dense(num_classes),
        Activation("softmax")
    ]
    model = Sequential(layers)

    print(model.summary())

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=1)
Beispiel #17
0
def top_level_task():
    ffconfig = FFConfig()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
    ffmodel = FFModel(ffconfig)

    dims = [ffconfig.batch_size, 784]
    input_tensor = ffmodel.create_tensor(dims, DataType.DT_FLOAT)

    num_samples = 60000

    model = MLP()

    ff_torch_model = PyTorchModel(model)
    output_tensors = ff_torch_model.torch_to_ff(ffmodel, [input_tensor])

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.optimizer = ffoptimizer
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label_tensor = ffmodel.label_tensor

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dataloader_input = ffmodel.create_data_loader(input_tensor, x_train)
    dataloader_label = ffmodel.create_data_loader(label_tensor, y_train)

    ffmodel.init_layers()

    epochs = ffconfig.epochs

    ts_start = ffconfig.get_current_time()

    ffmodel.fit(x=dataloader_input, y=dataloader_label, epochs=epochs)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))
Beispiel #18
0
def top_level_task():
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape)

    teacher_model = create_teacher_model_mlp(num_classes, x_train, y_train)

    create_student_model_mlp(teacher_model, num_classes, x_train, y_train)
Beispiel #19
0
def top_level_task():
  num_classes = 10

  img_rows, img_cols = 28, 28
  
  (x_train, y_train), (x_test, y_test) = mnist.load_data()
  x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
  
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))
  
  teacher_model = create_teacher_model_cnn(num_classes, x_train, y_train)

  create_student_model_cnn(teacher_model, num_classes, x_train, y_train)
Beispiel #20
0
def mlp():
  num_classes = 10

  (x_train, y_train), (x_test, y_test) = mnist.load_data()

  x_train = x_train.reshape(60000, 784)
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))
  #y_train = np.random.randint(1, 9, size=(len(y_train),1), dtype='int32')
  print("shape: ", x_train.shape)

  teacher_model = create_teacher_model(num_classes, x_train, y_train)

  create_student_model(teacher_model, num_classes, x_train, y_train)
Beispiel #21
0
def top_level_task():
    ffconfig = FFConfig()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.batch_size, ffconfig.workers_per_node, ffconfig.num_nodes))
    ffmodel = FFModel(ffconfig)

    dims_input = [ffconfig.batch_size, 784]
    input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)

    num_samples = 60000

    kernel_init = UniformInitializer(12, -1, 1)
    t = ffmodel.dense(input_tensor,
                      512,
                      ActiMode.AC_MODE_RELU,
                      kernel_initializer=kernel_init)
    t = ffmodel.dense(t, 512, ActiMode.AC_MODE_RELU)
    t = ffmodel.dense(t, 10)

    t = ffmodel.softmax(t)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.optimizer = ffoptimizer
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label_tensor = ffmodel.label_tensor

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dataloader_input = ffmodel.create_data_loader(input_tensor, x_train)
    dataloader_label = ffmodel.create_data_loader(label_tensor, y_train)

    ffmodel.init_layers()

    dense1 = ffmodel.get_layer_by_id(0)
    print(dense1)
    print(dense1.get_weight_tensor())
Beispiel #22
0
def cnn_concat():
    num_classes = 10

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    input_tensor = Input(batch_shape=[0, 1, 28, 28], dtype="float32")

    t1 = Conv2D(filters=32,
                input_shape=(1, 28, 28),
                kernel_size=(3, 3),
                strides=(1, 1),
                padding=(1, 1),
                activation="relu")(input_tensor)
    t2 = Conv2D(filters=32,
                input_shape=(1, 28, 28),
                kernel_size=(3, 3),
                strides=(1, 1),
                padding=(1, 1),
                activation="relu")(input_tensor)
    output = Concatenate(axis=1)([t1, t2])
    output = Conv2D(filters=64,
                    kernel_size=(3, 3),
                    strides=(1, 1),
                    padding=(1, 1),
                    activation="relu")(output)
    output = MaxPooling2D(pool_size=(2, 2), strides=(2, 2),
                          padding="valid")(output)
    output = Flatten()(output)
    output = Dense(128, activation="relu")(output)
    output = Dense(num_classes)(output)
    output = Activation("softmax")(output)

    model = Model(input_tensor, output)
    print(model.summary())

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=1)
Beispiel #23
0
def top_level_task():
    ffconfig = FFConfig()
    ffconfig.parse_args()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.get_batch_size(), ffconfig.get_workers_per_node(),
           ffconfig.get_num_nodes()))
    ffmodel = FFModel(ffconfig)

    dims1 = [ffconfig.get_batch_size(), 784]
    input1 = ffmodel.create_tensor(dims1, "", DataType.DT_FLOAT)

    dims_label = [ffconfig.get_batch_size(), 1]
    label = ffmodel.create_tensor(dims_label, "", DataType.DT_INT32)

    num_samples = 60000

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print(x_train.shape[0], 'train samples')
    print(y_train.shape)

    dims_full_input = [num_samples, 784]
    full_input = ffmodel.create_tensor(dims_full_input, "", DataType.DT_FLOAT)

    dims_full_label = [num_samples, 1]
    full_label = ffmodel.create_tensor(dims_full_label, "", DataType.DT_INT32)

    full_input.attach_numpy_array(ffconfig, x_train)
    full_label.attach_numpy_array(ffconfig, y_train)
    print(y_train)

    #dataloader = DataLoader2D(ffmodel, input1, label, full_input, full_label, num_samples)
    dataloader_input = SingleDataLoader(ffmodel, input1, full_input,
                                        num_samples, DataType.DT_FLOAT)
    dataloader_label = SingleDataLoader(ffmodel, label, full_label,
                                        num_samples, DataType.DT_INT32)

    full_input.detach_numpy_array(ffconfig)
    full_label.detach_numpy_array(ffconfig)

    t2 = ffmodel.dense("dense1", input1, 512, ActiMode.AC_MODE_RELU)
    t3 = ffmodel.dense("dense1", t2, 512, ActiMode.AC_MODE_RELU)
    t4 = ffmodel.dense("dense1", t3, 10)
    t5 = ffmodel.softmax("softmax", t4, label)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.set_sgd_optimizer(ffoptimizer)

    ffmodel.init_layers()

    epochs = ffconfig.get_epochs()

    ts_start = ffconfig.get_current_time()
    for epoch in range(0, epochs):
        dataloader_input.reset()
        dataloader_label.reset()
        # dataloader.reset()
        ffmodel.reset_metrics()
        iterations = num_samples / ffconfig.get_batch_size()
        for iter in range(0, int(iterations)):
            dataloader_input.next_batch(ffmodel)
            dataloader_label.next_batch(ffmodel)
            #dataloader.next_batch(ffmodel)
            if (epoch > 0):
                ffconfig.begin_trace(111)
            ffmodel.forward()
            ffmodel.zero_gradients()
            ffmodel.backward()
            ffmodel.update()
            if (epoch > 0):
                ffconfig.end_trace(111)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    dense1 = ffmodel.get_layer_by_id(0)

    dbias_tensor = label  #dense1.get_bias_tensor()
    dbias_tensor.inline_map(ffconfig)
    dbias = dbias_tensor.get_array(ffconfig, DataType.DT_INT32)
    print(dbias.shape)
    print(dbias)
    dbias_tensor.inline_unmap(ffconfig)
Beispiel #24
0
def top_level_task():
    alexnetconfig = NetConfig()
    ffconfig = FFConfig()
    ffconfig.parse_args()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.get_batch_size(), ffconfig.get_workers_per_node(),
           ffconfig.get_num_nodes()))
    ffmodel = FFModel(ffconfig)

    dims_input = [ffconfig.get_batch_size(), 784]
    input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)

    num_samples = 60000

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print(x_train.shape[0], 'train samples')
    print(y_train.shape)

    t2 = ffmodel.dense(input_tensor, 512, ActiMode.AC_MODE_RELU)
    t3 = ffmodel.dense(t2, 512, ActiMode.AC_MODE_RELU)
    t4 = ffmodel.dense(t3, 10)
    t5 = ffmodel.softmax(t4)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.set_sgd_optimizer(ffoptimizer)
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label_tensor = ffmodel.get_label_tensor()

    next_batch(0, x_train, input_tensor, ffconfig, ffmodel)
    next_batch_label(0, y_train, label_tensor, ffconfig, ffmodel)

    ffmodel.init_layers()

    epochs = ffconfig.get_epochs()

    ts_start = ffconfig.get_current_time()
    for epoch in range(0, epochs):
        ct = 0
        ffmodel.reset_metrics()
        iterations = num_samples / ffconfig.get_batch_size()
        for iter in range(0, int(iterations)):
            next_batch(ct, x_train, input_tensor, ffconfig, ffmodel)
            next_batch_label(ct, y_train, label_tensor, ffconfig, ffmodel)
            ct += 1
            if (epoch > 0):
                ffconfig.begin_trace(111)
            ffmodel.forward()
            ffmodel.zero_gradients()
            ffmodel.backward()
            ffmodel.update()
            if (epoch > 0):
                ffconfig.end_trace(111)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    perf_metrics = ffmodel.get_perf_metrics()
    accuracy = perf_metrics.get_accuracy()
    if accuracy < 65:
        assert 0, 'Check Accuracy'

    dense1 = ffmodel.get_layer_by_id(0)

    dbias_tensor = label_tensor  #dense1.get_bias_tensor()
    dbias_tensor.inline_map(ffconfig)
    dbias = dbias_tensor.get_array(ffconfig, DataType.DT_INT32)
    print(dbias.shape)
    print(dbias)
    dbias_tensor.inline_unmap(ffconfig)
def top_level_task():
    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape)

    #teacher

    input_tensor1 = Input(shape=(784, ), dtype="float32")

    d1 = Dense(512, input_shape=(784, ), activation="relu")
    d2 = Dense(512, activation="relu")
    d3 = Dense(num_classes)

    output = d1(input_tensor1)
    output = d2(output)
    output = d3(output)
    output = Activation("softmax")(output)

    teacher_model = Model(input_tensor1, output)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    teacher_model.compile(
        optimizer=opt,
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy', 'sparse_categorical_crossentropy'])

    teacher_model.fit(x_train, y_train, epochs=10)

    d1_kernel, d1_bias = d1.get_weights(teacher_model.ffmodel)
    d2_kernel, d2_bias = d2.get_weights(teacher_model.ffmodel)
    d3_kernel, d3_bias = d3.get_weights(teacher_model.ffmodel)

    # student

    input_tensor2 = Input(shape=(784, ), dtype="float32")

    sd1_1 = Dense(512, input_shape=(784, ), activation="relu")
    sd2 = Dense(512, activation="relu")
    sd3 = Dense(num_classes)

    output = sd1_1(input_tensor2)
    output = sd2(output)
    output = sd3(output)
    output = Activation("softmax")(output)

    student_model = Model(input_tensor2, output)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    student_model.compile(
        optimizer=opt,
        loss='sparse_categorical_crossentropy',
        metrics=['accuracy', 'sparse_categorical_crossentropy'])

    sd1_1.set_weights(student_model.ffmodel, d1_kernel, d1_bias)
    sd2.set_weights(student_model.ffmodel, d2_kernel, d2_bias)
    sd3.set_weights(student_model.ffmodel, d3_kernel, d3_bias)

    student_model.fit(x_train,
                      y_train,
                      epochs=160,
                      callbacks=[
                          VerifyMetrics(ModelAccuracy.MNIST_MLP),
                          EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)
                      ])
Beispiel #26
0
def top_level_task():
    ffconfig = FFConfig()
    ffconfig.parse_args()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.get_batch_size(), ffconfig.get_workers_per_node(),
           ffconfig.get_num_nodes()))
    ffmodel = FFModel(ffconfig)

    dims = [ffconfig.get_batch_size(), 784]
    input_tensor = ffmodel.create_tensor(dims, "", DataType.DT_FLOAT)

    num_samples = 60000

    output_tensors = ffmodel.construct_model_from_file([input_tensor],
                                                       "mlp.ff")

    t = ffmodel.softmax(output_tensors[0])

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.set_sgd_optimizer(ffoptimizer)
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label = ffmodel.get_label_tensor()

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dims_full_input = [num_samples, 784]
    full_input = ffmodel.create_tensor(dims_full_input, "", DataType.DT_FLOAT)

    dims_full_label = [num_samples, 1]
    full_label = ffmodel.create_tensor(dims_full_label, "", DataType.DT_INT32)

    full_input.attach_numpy_array(ffconfig, x_train)
    full_label.attach_numpy_array(ffconfig, y_train)

    dataloader_input = SingleDataLoader(ffmodel, input_tensor, full_input,
                                        num_samples, DataType.DT_FLOAT)
    dataloader_label = SingleDataLoader(ffmodel, label, full_label,
                                        num_samples, DataType.DT_INT32)

    full_input.detach_numpy_array(ffconfig)
    full_label.detach_numpy_array(ffconfig)

    ffmodel.init_layers()

    epochs = ffconfig.get_epochs()

    ts_start = ffconfig.get_current_time()
    for epoch in range(0, epochs):
        dataloader_input.reset()
        dataloader_label.reset()
        ffmodel.reset_metrics()
        iterations = num_samples / ffconfig.get_batch_size()
        for iter in range(0, int(iterations)):
            dataloader_input.next_batch(ffmodel)
            dataloader_label.next_batch(ffmodel)
            if (epoch > 0):
                ffconfig.begin_trace(111)
            ffmodel.forward()
            ffmodel.zero_gradients()
            ffmodel.backward()
            ffmodel.update()
            if (epoch > 0):
                ffconfig.end_trace(111)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))
Beispiel #27
0
def top_level_task():
    ffconfig = FFConfig()
    ffconfig.parse_args()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.get_batch_size(), ffconfig.get_workers_per_node(),
           ffconfig.get_num_nodes()))
    ffmodel = FFModel(ffconfig)

    dims_input = [ffconfig.get_batch_size(), 784]
    input_tensor = ffmodel.create_tensor(dims_input, DataType.DT_FLOAT)

    num_samples = 60000

    kernel_init = UniformInitializer(12, -1, 1)
    t = ffmodel.dense(input_tensor,
                      512,
                      ActiMode.AC_MODE_RELU,
                      kernel_initializer=kernel_init)
    t = ffmodel.dense(t, 512, ActiMode.AC_MODE_RELU)
    t = ffmodel.dense(t, 10)

    t = ffmodel.softmax(t)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.set_sgd_optimizer(ffoptimizer)
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label_tensor = ffmodel.get_label_tensor()

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    print(x_train.shape)
    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dims_full_input = [num_samples, 784]
    full_input = ffmodel.create_tensor(dims_full_input, DataType.DT_FLOAT)

    dims_full_label = [num_samples, 1]
    full_label = ffmodel.create_tensor(dims_full_label, DataType.DT_INT32)

    full_input.attach_numpy_array(ffconfig, x_train)
    full_label.attach_numpy_array(ffconfig, y_train)

    dataloader_input = SingleDataLoader(ffmodel, input_tensor, full_input,
                                        num_samples, DataType.DT_FLOAT)
    dataloader_label = SingleDataLoader(ffmodel, label_tensor, full_label,
                                        num_samples, DataType.DT_INT32)

    full_input.detach_numpy_array(ffconfig)
    full_label.detach_numpy_array(ffconfig)

    ffmodel.init_layers()

    epochs = ffconfig.get_epochs()

    ts_start = ffconfig.get_current_time()

    ffmodel.train((dataloader_input, dataloader_label), epochs)
    ffmodel.eval((dataloader_input, dataloader_label))

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    perf_metrics = ffmodel.get_perf_metrics()
    accuracy = perf_metrics.get_accuracy()
    if accuracy < ModelAccuracy.MNIST_MLP.value:
        assert 0, 'Check Accuracy'
Beispiel #28
0
def top_level_task():
    ffconfig = FFConfig()
    ffconfig.parse_args()
    print("Python API batchSize(%d) workersPerNodes(%d) numNodes(%d)" %
          (ffconfig.get_batch_size(), ffconfig.get_workers_per_node(),
           ffconfig.get_num_nodes()))
    ffmodel = FFModel(ffconfig)

    dims1 = [ffconfig.get_batch_size(), 1, 28, 28]
    input1 = ffmodel.create_tensor(dims1, "", DataType.DT_FLOAT)

    # dims_label = [ffconfig.get_batch_size(), 1]
    # label = ffmodel.create_tensor(dims_label, "", DataType.DT_INT32);

    num_samples = 60000

    t = ffmodel.conv2d(input1, 32, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU,
                       True)
    t = ffmodel.conv2d(t, 64, 3, 3, 1, 1, 1, 1, ActiMode.AC_MODE_RELU, True)
    t = ffmodel.pool2d(t, 2, 2, 2, 2, 0, 0)
    t = ffmodel.flat(t)
    t = ffmodel.dense(t, 128, ActiMode.AC_MODE_RELU)
    t = ffmodel.dense(t, 10)
    t = ffmodel.softmax(t)

    ffoptimizer = SGDOptimizer(ffmodel, 0.01)
    ffmodel.set_sgd_optimizer(ffoptimizer)
    ffmodel.compile(loss_type=LossType.LOSS_SPARSE_CATEGORICAL_CROSSENTROPY,
                    metrics=[
                        MetricsType.METRICS_ACCURACY,
                        MetricsType.METRICS_SPARSE_CATEGORICAL_CROSSENTROPY
                    ])
    label = ffmodel.get_label_tensor()

    img_rows, img_cols = 28, 28
    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))

    dims_full_input = [num_samples, 1, 28, 28]
    full_input = ffmodel.create_tensor(dims_full_input, "", DataType.DT_FLOAT)

    dims_full_label = [num_samples, 1]
    full_label = ffmodel.create_tensor(dims_full_label, "", DataType.DT_INT32)

    full_input.attach_numpy_array(ffconfig, x_train)
    full_label.attach_numpy_array(ffconfig, y_train)
    print(y_train)

    #dataloader = DataLoader2D(ffmodel, input1, label, full_input, full_label, num_samples)
    dataloader_input = SingleDataLoader(ffmodel, input1, full_input,
                                        num_samples, DataType.DT_FLOAT)
    dataloader_label = SingleDataLoader(ffmodel, label, full_label,
                                        num_samples, DataType.DT_INT32)

    full_input.detach_numpy_array(ffconfig)
    full_label.detach_numpy_array(ffconfig)

    ffmodel.init_layers()

    epochs = ffconfig.get_epochs()

    ts_start = ffconfig.get_current_time()
    for epoch in range(0, epochs):
        dataloader_input.reset()
        dataloader_label.reset()
        # dataloader.reset()
        ffmodel.reset_metrics()
        iterations = num_samples / ffconfig.get_batch_size()
        for iter in range(0, int(iterations)):
            dataloader_input.next_batch(ffmodel)
            dataloader_label.next_batch(ffmodel)
            #dataloader.next_batch(ffmodel)
            if (epoch > 0):
                ffconfig.begin_trace(111)
            ffmodel.forward()
            ffmodel.zero_gradients()
            ffmodel.backward()
            ffmodel.update()
            if (epoch > 0):
                ffconfig.end_trace(111)

    ts_end = ffconfig.get_current_time()
    run_time = 1e-6 * (ts_end - ts_start)
    print("epochs %d, ELAPSED TIME = %.4fs, THROUGHPUT = %.2f samples/s\n" %
          (epochs, run_time, num_samples * epochs / run_time))

    perf_metrics = ffmodel.get_perf_metrics()
    accuracy = perf_metrics.get_accuracy()
    if accuracy < ModelAccuracy.MNIST_CNN.value:
        assert 0, 'Check Accuracy'

    dense1 = ffmodel.get_layer_by_id(0)

    label.inline_map(ffconfig)
    label_array = label.get_array(ffconfig, DataType.DT_INT32)
    print(label_array.shape)
    print(label_array)
    label.inline_unmap(ffconfig)

    input1.inline_map(ffconfig)
    input1_array = input1.get_array(ffconfig, DataType.DT_FLOAT)
    print(input1_array.shape)
    print(input1_array[10, :, :, :])
    input1.inline_unmap(ffconfig)