예제 #1
0
def top_level_task():

    num_classes = 10

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape, x_train.__array_interface__["strides"])

    # model = Sequential()
    # model.add(Conv2D(filters=32, input_shape=(1,28,28), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
    # model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
    # model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
    # model.add(Flatten())
    # model.add(Dense(128, activation="relu"))
    # model.add(Dense(num_classes))
    # model.add(Activation("softmax"))

    layers = [
        Conv2D(filters=32,
               input_shape=(1, 28, 28),
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"),
        Flatten(),
        Dense(128, activation="relu"),
        Dense(num_classes),
        Activation("softmax")
    ]
    model = Sequential(layers)

    print(model.summary())

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=1)
예제 #2
0
def top_level_task():

    num_classes = 10

    img_rows, img_cols = 28, 28

    (x_train, y_train), (x_test, y_test) = mnist.load_data()
    x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print("shape: ", x_train.shape, x_train.__array_interface__["strides"])

    layers = [
        Input(shape=(1, 28, 28), dtype="float32"),
        Conv2D(filters=32,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"),
        MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"),
        Flatten(),
        Dense(128, activation="relu"),
        Dense(num_classes),
        Activation("softmax")
    ]
    model = Sequential(layers)

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])

    print(model.summary())

    model.fit(x_train,
              y_train,
              epochs=5,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.MNIST_CNN),
                  EpochVerifyMetrics(ModelAccuracy.MNIST_CNN)
              ])
예제 #3
0
def create_student_model_cnn(teacher_model, num_classes, x_train, y_train):
  conv1 = teacher_model.get_layer(index=0)
  c1_kernel, c1_bias = conv1.get_weights(teacher_model.ffmodel)
  print(c1_kernel.shape, c1_bias.shape)

  conv2 = teacher_model.get_layer(index=1)
  c2_kernel, c2_bias = conv2.get_weights(teacher_model.ffmodel)
  
  dense1 = teacher_model.get_layer(index=4)
  d1_kernel, d1_bias = dense1.get_weights(teacher_model.ffmodel)
  
  dense2 = teacher_model.get_layer(index=5)
  d2_kernel, d2_bias = dense2.get_weights(teacher_model.ffmodel)
  
  model = Sequential()
  model.add(Conv2D(filters=32, input_shape=(1,28,28), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
  model.add(Flatten())
  model.add(Dense(128, activation="relu", name="dense1"))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
  
  conv1s = model.get_layer(index=0)
  conv2s = model.get_layer(index=1)
  dense1s = model.get_layer(name="dense1")
  dense2s = model.get_layer(index=5)
  
  conv1s.set_weights(model.ffmodel, c1_kernel, c1_bias)
  conv2s.set_weights(model.ffmodel, c2_kernel, c2_bias)
  dense1s.set_weights(model.ffmodel, d1_kernel, d1_bias)
  dense2s.set_weights(model.ffmodel, d2_kernel, d2_bias)
  
  print(model.summary())
  
  model.fit(x_train, y_train, epochs=5, callbacks=[VerifyMetrics(ModelAccuracy.MNIST_CNN), EpochVerifyMetrics(ModelAccuracy.MNIST_CNN)])
예제 #4
0
def create_teacher_model_cnn(num_classes, x_train, y_train):
  model = Sequential()
  model.add(Conv2D(filters=32, input_shape=(1,28,28), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
  model.add(Flatten())
  model.add(Dense(128, activation="relu"))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
  
  print(model.summary())

  model.fit(x_train, y_train, epochs=5)
  return model
예제 #5
0
def top_level_task():

    num_classes = 10

    num_samples = 10000

    (x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)

    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    print("shape: ", x_train.shape)

    model = Sequential()
    model.add(
        Conv2D(filters=32,
               input_shape=(3, 32, 32),
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"))
    model.add(
        Conv2D(filters=32,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"))
    model.add(
        Conv2D(filters=64,
               kernel_size=(3, 3),
               strides=(1, 1),
               padding=(1, 1),
               activation="relu"))
    model.add(MaxPooling2D(pool_size=(2, 2), strides=(2, 2), padding="valid"))
    model.add(Flatten())
    model.add(Dense(512, activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.02)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])
    print(model.summary())

    model.fit(x_train,
              y_train,
              epochs=30,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.CIFAR10_CNN),
                  EpochVerifyMetrics(ModelAccuracy.CIFAR10_CNN)
              ])
예제 #6
0
def top_level_task():

    num_classes = 10

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    x_train = x_train.reshape(60000, 784)
    x_train = x_train.astype('float32')
    x_train /= 255
    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    #y_train = np.random.randint(1, 9, size=(len(y_train),1), dtype='int32')
    print("shape: ", x_train.shape)

    model = Sequential()
    model.add(Dense(512, input_shape=(784, ), activation="relu"))
    model.add(Dense(512, activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    print(model.summary())

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=1)
def top_level_task():
  num_classes = 10
  
  num_samples = 10000
  
  (x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
  
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  print("shape: ", x_train.shape)

  model1 = Sequential()
  model1.add(Conv2D(filters=32, input_shape=(3,32,32), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu", name="conv2d_0_0"))
  model1.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu", name="conv2d_1_0"))
  print(model1.summary())

  model2 = Sequential()
  model2.add(Conv2D(filters=32, input_shape=(3,32,32), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu", name="conv2d_0_1"))
  model2.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu", name="conv2d_1_1"))
  print(model2.summary())
  
  output_tensor = Concatenate(axis=1)([model1.output, model2.output])
  output_tensor = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid")(output_tensor)
  output_tensor = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu", name="conv2d_0_4")(output_tensor)
  output_tensor = Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu")(output_tensor)
  output_tensor = MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid")(output_tensor)
  output_tensor = Flatten()(output_tensor)
  output_tensor = Dense(512, activation="relu")(output_tensor)
  output_tensor = Dense(num_classes)(output_tensor)
  output_tensor = Activation("softmax")(output_tensor)

  model = Model([model1.input[0], model2.input[0]], output_tensor)
  
  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
  print(model.summary())

  model.fit([x_train, x_train], y_train, epochs=40, callbacks=[VerifyMetrics(ModelAccuracy.CIFAR10_CNN), EpochVerifyMetrics(ModelAccuracy.CIFAR10_CNN)])
예제 #8
0
def top_level_task():
  
  num_classes = 10
  
  num_samples = 10000
  
  (x_train, y_train), (x_test, y_test) = cifar10.load_data(num_samples)
  
  x_train = x_train.astype('float32')
  x_train /= 255
  #x_train *= 0
  #y_train = np.random.randint(1, 9, size=(num_samples,1), dtype='int32')
  y_train = y_train.astype('int32')
  print("shape: ", x_train.shape)
  
  model = Sequential()
  model.add(Conv2D(filters=32, input_shape=(3,32,32), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(Conv2D(filters=32, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
  model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
  model.add(Flatten())
  model.add(Dense(512, activation="relu"))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))
  
  print(model.summary())

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt)

  model.fit(x_train, y_train, epochs=1)
예제 #9
0
def top_level_task():

    max_words = 1000
    epochs = 5

    print('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = reuters.load_data(num_words=max_words,
                                                     test_split=0.2)
    print(len(x_train), 'train sequences')
    print(len(x_test), 'test sequences')

    num_classes = np.max(y_train) + 1
    print(num_classes, 'classes')

    print('Vectorizing sequence data...')
    tokenizer = Tokenizer(num_words=max_words)
    x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
    x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
    x_train = x_train.astype('float32')
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)

    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print('y_train shape:', y_train.shape)

    model = Sequential()
    model.add(Input(shape=(max_words, )))
    model.add(Dense(512, activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    opt = flexflow.keras.optimizers.Adam(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])
    print(model.summary())

    model.fit(x_train,
              y_train,
              epochs=epochs,
              callbacks=[VerifyMetrics(ModelAccuracy.REUTERS_MLP)])
def top_level_task():
  
  num_classes = 10
  
  (x_train, y_train), (x_test, y_test) = mnist.load_data()
  
  x_train = x_train.reshape(60000, 784)
  x_train = x_train.astype('float32')
  x_train /= 255
  y_train = y_train.astype('int32')
  y_train = np.reshape(y_train, (len(y_train), 1))
  print("shape: ", x_train.shape)
  
  model = Sequential()
  d1 = Dense(512, input_shape=(784,), kernel_initializer=GlorotUniform(123), bias_initializer=Zeros())
  model.add(d1)
  model.add(Activation('relu'))
  model.add(Dropout(0.2))
  model.add(Dense(512, activation="relu"))
  model.add(Dropout(0.2))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt, loss='sparse_categorical_crossentropy', metrics=['accuracy', 'sparse_categorical_crossentropy'])
  
  print(model.summary())

  model.fit(x_train, y_train, epochs=20, callbacks=[VerifyMetrics(ModelAccuracy.MNIST_MLP), EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)])
  model.evaluate(x=x_train, y=y_train)
예제 #11
0
def create_student_model_cnn(teacher_model, num_classes, x_train, y_train):
  conv1 = teacher_model.get_layer(0)
  c1_kernel, c1_bias = conv1.get_weights(teacher_model.ffmodel)
  print(c1_kernel.shape, c1_bias.shape)

  conv2 = teacher_model.get_layer(1)
  c2_kernel, c2_bias = conv2.get_weights(teacher_model.ffmodel)
  
  dense1 = teacher_model.get_layer(4)
  d1_kernel, d1_bias = dense1.get_weights(teacher_model.ffmodel)
  
  dense2 = teacher_model.get_layer(5)
  d2_kernel, d2_bias = dense2.get_weights(teacher_model.ffmodel)
  
  model = Sequential()
  model.add(Conv2D(filters=32, input_shape=(1,28,28), kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=(1,1), activation="relu"))
  model.add(MaxPooling2D(pool_size=(2,2), strides=(2,2), padding="valid"))
  model.add(Flatten())
  model.add(Dense(128, activation="relu"))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt)
  
  conv1s = model.get_layer(0)
  conv2s = model.get_layer(1)
  dense1s = model.get_layer(4)
  dense2s = model.get_layer(5)
  
  conv1s.set_weights(model.ffmodel, c1_kernel, c1_bias)
  conv2s.set_weights(model.ffmodel, c2_kernel, c2_bias)
  dense1s.set_weights(model.ffmodel, d1_kernel, d1_bias)
  dense2s.set_weights(model.ffmodel, d2_kernel, d2_bias)
  
  print(model.summary())
  

  model.fit(x_train, y_train, epochs=1)
예제 #12
0
def create_teacher_model(num_classes, x_train, y_train):
  model = Sequential()
  model.add(Dense(512, input_shape=(784,), activation="relu"))
  model.add(Dense(512, activation="relu"))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt)

  model.fit(x_train, y_train, epochs=1)
  
  dense3 = model.get_layer(2)
  d3_kernel, d3_bias = dense3.get_weights(model.ffmodel)
  print(d3_bias)
  d3_kernel = np.reshape(d3_kernel, (d3_kernel.shape[1], d3_kernel.shape[0]))
  print(d3_kernel)
  return model
예제 #13
0
def create_student_model(teacher_model, num_classes, x_train, y_train):
  dense1 = teacher_model.get_layer(0)
  d1_kernel, d1_bias = dense1.get_weights(teacher_model.ffmodel)
  print(d1_kernel.shape, d1_bias.shape)
  # print(d1_kernel)
  # print(d1_bias)
  dense2 = teacher_model.get_layer(1)
  d2_kernel, d2_bias = dense2.get_weights(teacher_model.ffmodel)
  
  dense3 = teacher_model.get_layer(2)
  d3_kernel, d3_bias = dense3.get_weights(teacher_model.ffmodel)
  
  model = Sequential()
  model.add(Dense(512, input_shape=(784,), activation="relu"))
  model.add(Dense(512, activation="relu"))
  model.add(Dense(num_classes))
  model.add(Activation("softmax"))

  opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
  model.compile(optimizer=opt)
  
  dense1s = model.get_layer(0)
  dense2s = model.get_layer(1)
  dense3s = model.get_layer(2)
  
  dense1s.set_weights(model.ffmodel, d1_kernel, d1_bias)
  dense2s.set_weights(model.ffmodel, d2_kernel, d2_bias)
  dense3s.set_weights(model.ffmodel, d3_kernel, d3_bias)
  
  d3_kernel, d3_bias = dense3s.get_weights(model.ffmodel)
  print(d3_kernel)
  print(d3_bias)
  

  model.fit(x_train, y_train, epochs=1)
예제 #14
0
def create_student_model_mlp(teacher_model, num_classes, x_train, y_train):
    dense1 = teacher_model.get_layer(index=0)
    d1_kernel, d1_bias = dense1.get_weights(teacher_model.ffmodel)
    print(d1_kernel.shape, d1_bias.shape)
    dense2 = teacher_model.get_layer(index=1)
    d2_kernel, d2_bias = dense2.get_weights(teacher_model.ffmodel)

    dense3 = teacher_model.get_layer(index=2)
    d3_kernel, d3_bias = dense3.get_weights(teacher_model.ffmodel)

    model = Sequential()
    model.add(Dense(512, input_shape=(784, ), activation="relu"))
    model.add(Dense(512, activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])

    dense1s = model.get_layer(index=0)
    dense2s = model.get_layer(index=1)
    dense3s = model.get_layer(index=2)

    dense1s.set_weights(model.ffmodel, d1_kernel, d1_bias)
    dense2s.set_weights(model.ffmodel, d2_kernel, d2_bias)
    dense3s.set_weights(model.ffmodel, d3_kernel, d3_bias)

    d3_kernel, d3_bias = dense3s.get_weights(model.ffmodel)
    print(d3_kernel)
    print(d3_bias)

    model.fit(x_train,
              y_train,
              epochs=5,
              callbacks=[
                  VerifyMetrics(ModelAccuracy.MNIST_MLP),
                  EpochVerifyMetrics(ModelAccuracy.MNIST_MLP)
              ])
예제 #15
0
def create_teacher_model_mlp(num_classes, x_train, y_train):
    model = Sequential()
    model.add(Dense(512, input_shape=(784, ), activation="relu"))
    model.add(Dense(512, activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    opt = flexflow.keras.optimizers.SGD(learning_rate=0.01)
    model.compile(optimizer=opt,
                  loss='sparse_categorical_crossentropy',
                  metrics=['accuracy', 'sparse_categorical_crossentropy'])

    model.fit(x_train, y_train, epochs=1)

    dense3 = model.get_layer(index=2)
    d3_kernel, d3_bias = dense3.get_weights(model.ffmodel)
    print(d3_bias)
    d3_kernel = np.reshape(d3_kernel, (d3_kernel.shape[1], d3_kernel.shape[0]))
    print(d3_kernel)
    return model
예제 #16
0
def top_level_task():

    max_words = 1000
    epochs = 5

    print('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = reuters.load_data(num_words=max_words,
                                                     test_split=0.2)
    print(len(x_train), 'train sequences')
    print(len(x_test), 'test sequences')

    num_classes = np.max(y_train) + 1
    print(num_classes, 'classes')

    print('Vectorizing sequence data...')
    tokenizer = Tokenizer(num_words=max_words)
    x_train = tokenizer.sequences_to_matrix(x_train, mode='binary')
    x_test = tokenizer.sequences_to_matrix(x_test, mode='binary')
    x_train = x_train.astype('float32')
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)

    y_train = y_train.astype('int32')
    y_train = np.reshape(y_train, (len(y_train), 1))
    print('y_train shape:', y_train.shape)

    model = Sequential()
    model.add(Dense(512, input_shape=(max_words, ), activation="relu"))
    model.add(Dense(num_classes))
    model.add(Activation("softmax"))

    opt = flexflow.keras.optimizers.Adam(learning_rate=0.01)
    model.compile(optimizer=opt)

    model.fit(x_train, y_train, epochs=epochs)