コード例 #1
0
def build_model():
    model = Sequential()
    model.add(
        BinaryDense(512,
                    input_shape=(784, ),
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias))
    model.add(Activation(binary_tanh))
    model.add(
        BinaryDense(512,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias))
    model.add(Activation(binary_tanh))
    model.add(
        BinaryDense(10,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias))
    opt = Adam(lr=lr_start)
    model.compile(loss='squared_hinge', optimizer=opt, metrics=['accuracy'])
    return model
コード例 #2
0
def BinaryMobilenetV2():
  model = Sequential() 
  model.add(BinaryConv2D(32, kernel_size=kernel_size, strides=(2, 2), H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_last',
                     padding='same', use_bias=use_bias,input_shape=(32, 32, 3)))
  model.add(BatchNormalization(axis=1))
  model.add(Activation(binary_tanh))
  model = InvertedResidualBlock(model, 16, (3, 3), t=1, strides=1, n=1)
  model = InvertedResidualBlock(model, 24, (3, 3), t=6, strides=2, n=2)
  model = InvertedResidualBlock(model, 32, (3, 3), t=6, strides=2, n=3)
  model = InvertedResidualBlock(model, 64, (3, 3), t=6, strides=2, n=4)
  model = InvertedResidualBlock(model, 96, (3, 3), t=6, strides=1, n=3)
  model = InvertedResidualBlock(model, 160, (3, 3), t=6, strides=2, n=3)
  model = InvertedResidualBlock(model, 320, (3, 3), t=6, strides=1, n=1)

  model.add(BinaryConv2D(1280, H=H,kernel_size=(1,1), kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias,strides=(1, 1)))
  model.add(BatchNormalization(axis=1))
  model.add(Activation(binary_tanh))

  model.add(Flatten()) 
  model.add(BinaryDense(classes, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
  model.add(BatchNormalization(axis=1))

  return model
コード例 #3
0
def create_model(X_train, Y_train, X_test, Y_test):
    
    # Hyperparameters
    
    H = 'Glorot'
    kernel_lr_multiplier = 'Glorot'
    use_bias = False
    epsilon = 1e-3
    momentum = 0.9
    epochs = 10
    batch_size = {{choice([512])}}
    
    # Number of units per layer 
    N = 64       
    
    # Building the model
    
    model = Sequential()
    # Input layer
    model.add(BatchNormalization(input_shape=(16,), momentum=momentum, epsilon=epsilon))
    model.add(Activation(binary_sigmoid))
    
    # Hidden Layer 1
    model.add(BinaryDense(N, W_regularizer=l2(0.0), H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
    model.add(BatchNormalization())
    model.add(Activation(binary_sigmoid))
    # Hidden Layer 2
    model.add(BinaryDense(N, W_regularizer=l2(0.0), H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
    model.add(BatchNormalization())
    model.add(Activation(binary_sigmoid))
    # Hidden Layer 3
    model.add(BinaryDense(N, W_regularizer=l2(0.0), H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
    model.add(BatchNormalization())
    model.add(Activation(binary_sigmoid))
    # Hidden Layer 4
    model.add(BinaryDense(N, W_regularizer=l2(0.0), H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
    model.add(BatchNormalization())
    model.add(Activation(binary_sigmoid))
    # Output layer 
    model.add(BinaryDense(1, W_regularizer=l2(0.0), H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias ))    
    model.add(BatchNormalization())
    model.add(Activation(binary_sigmoid))
    
    # Optimiser & cost functions
    opt=Adam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, amsgrad=False)
    model.compile(loss='mse',optimizer=opt,metrics=['binary_accuracy'])
    
    model.summary()
     
    result = model.fit(X_train, Y_train,batch_size=batch_size,epochs= epochs,
                       verbose=2,
                       validation_data=(X_test, Y_test))
    
    # Plotting training curve
    
    acc = result.history['binary_accuracy']
    val_acc = result.history['val_binary_accuracy']
    loss = result.history['loss']
    val_loss = result.history['val_loss']
    
    epochs = range(len(acc))
    
    plt.subplot(2, 1, 1)
    plt.plot(epochs, val_acc, 'r', label='Validation acc')
    plt.title('Accuracy & Loss History')
    plt.ylabel('Accuracy')
    
    plt.subplot(2, 1, 2)
    plt.plot(epochs, val_loss, 'r', label='Validation loss')
    plt.ylabel('Loss')
    plt.xlabel('Epoch')
    
    plt.show()
    
    validation_acc = np.amax(result.history['val_binary_accuracy']) 
    tr_loss = np.amax(result.history['loss'])
    print('Best validation acc of epoch:', validation_acc)
    
    return {'loss': tr_loss, 'status': STATUS_OK, 'model': model}
コード例 #4
0
def BinaryVGG16():
  model = Sequential()
  # 64
  model.add(BinaryConv2D(64, kernel_size=kernel_size, input_shape=(32, 32, 3),
                         data_format='channels_last',
                         H=H, kernel_lr_multiplier=kernel_lr_multiplier, 
                         padding='same', use_bias=use_bias, name='conv1'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn1'))
  model.add(Activation(binary_tanh, name='act1'))
  model.add(BinaryConv2D(64, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier, 
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv2'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool2', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn2'))
  model.add(Activation(binary_tanh, name='act2'))
  # 128
  model.add(BinaryConv2D(128, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv3'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn3'))
  model.add(Activation(binary_tanh, name='act3'))
  model.add(BinaryConv2D(128, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv4'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool4', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn4'))
  model.add(Activation(binary_tanh, name='act4'))
  # 256
  model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv5'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn5'))
  model.add(Activation(binary_tanh, name='act5'))
  model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv6'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn6'))
  model.add(Activation(binary_tanh, name='act6'))
  model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv7'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool7', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn7'))
  model.add(Activation(binary_tanh, name='act7'))
  # 512
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv8'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn8'))
  model.add(Activation(binary_tanh, name='act8'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv9'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn9'))
  model.add(Activation(binary_tanh, name='act9'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv10'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool10', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn10'))
  model.add(Activation(binary_tanh, name='act10'))
  #512
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv11'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn11'))
  model.add(Activation(binary_tanh, name='act11'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv12'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn12'))
  model.add(Activation(binary_tanh, name='act12'))
  model.add(BinaryConv2D(512, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                         data_format='channels_last',
                         padding='same', use_bias=use_bias, name='conv13'))
  model.add(MaxPooling2D(pool_size=pool_size, name='pool13', data_format='channels_last'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn13'))
  model.add(Activation(binary_tanh, name='act13'))
  #final
  model.add(Flatten())
  # dense1
  model.add(BinaryDense(4096, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense1'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn14'))
  model.add(Activation(binary_tanh, name='act14'))
  # dense2
  model.add(BinaryDense(4096, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense2'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn15'))
  model.add(Activation(binary_tanh, name='act15'))
  #dense3
  model.add(BinaryDense(classes, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense3'))
  model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn16'))

  return model
コード例 #5
0
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train,
                                  nb_classes) * 2 - 1  # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, nb_classes) * 2 - 1

model = Sequential()
model.add(DropoutNoScale(drop_in, input_shape=(784, ), name='drop0'))
for i in range(num_hidden):
    model.add(
        BinaryDense(num_unit,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense{}'.format(i + 1)))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           name='bn{}'.format(i + 1)))
    model.add(Activation(binary_tanh, name='act{}'.format(i + 1)))
    model.add(DropoutNoScale(drop_hidden, name='drop{}'.format(i + 1)))
model.add(
    BinaryDense(10,
                H=H,
                kernel_lr_multiplier=kernel_lr_multiplier,
                use_bias=use_bias,
                name='dense'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn'))
コード例 #6
0
ファイル: mnist_cnn.py プロジェクト: ml-lab/nn_playground
                        3,
                        H=H,
                        W_lr_multiplier=W_lr_multiplier,
                        border_mode='same',
                        bias=bias,
                        name='conv4'))
model.add(MaxPooling2D(pool_size=(2, 2), name='pool4'))
model.add(
    BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn4'))
model.add(Activation(binary_tanh, name='act4'))
model.add(Flatten())
# dense1
model.add(
    BinaryDense(1024,
                H=H,
                W_lr_multiplier=W_lr_multiplier,
                bias=bias,
                name='dense5'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
model.add(Activation(binary_tanh, name='act5'))
# dense2
model.add(
    BinaryDense(nb_classes,
                H=H,
                W_lr_multiplier=W_lr_multiplier,
                bias=bias,
                name='dense6'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))

opt = Adam(lr=lr_start)
model.compile(loss='squared_hinge', optimizer=opt, metrics=['acc'])
コード例 #7
0
# conv3
model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                       data_format='channels_first',
                       padding='same', use_bias=use_bias, name='conv3'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn3'))
model.add(Activation(binary_tanh, name='act3'))
# conv4
model.add(BinaryConv2D(256, kernel_size=kernel_size, H=H, kernel_lr_multiplier=kernel_lr_multiplier,
                       data_format='channels_first',
                       padding='same', use_bias=use_bias, name='conv4'))
model.add(MaxPooling2D(pool_size=pool_size, name='pool4'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn4'))
model.add(Activation(binary_tanh, name='act4'))
model.add(Flatten())
# dense1
model.add(BinaryDense(1024, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense5'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
model.add(Activation(binary_tanh, name='act5'))
# dense2
model.add(BinaryDense(classes, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias, name='dense6'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))

opt = Adam(lr=lr_start) 
model.compile(loss='squared_hinge', optimizer=opt, metrics=['acc'])
model.summary()

lr_scheduler = LearningRateScheduler(lambda e: lr_start * lr_decay ** e)
history = model.fit(X_train, Y_train,
                    batch_size=batch_size, epochs=epochs,
                    verbose=1, validation_data=(X_test, Y_test),
                    callbacks=[lr_scheduler])
コード例 #8
0
lr_start = 1e-3
lr_end = 1e-4
lr_decay = (lr_end / lr_start)**(1. / epochs)

# BN
epsilon = 1e-6
momentum = 0.9

# dropout
drop_in = 0.2
drop_hidden = 0.5

# Step 2: Build the Model

model = Sequential()
model.add(BinaryDense(512, input_shape=(784,), H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
model.add(Activation(binary_tanh))
model.add(BinaryDense(512, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))
model.add(Activation(binary_tanh))
model.add(BinaryDense(10, H=H, kernel_lr_multiplier=kernel_lr_multiplier, use_bias=use_bias))

# Printing the model
print(model.summary())
for layer in model.layers:
    h = layer.get_weights()
print(h)

opt = Adam(lr=lr_start)


# Step 3: Compile the Model
def build_model(kernel_size, channels, img_rows, img_cols, H,
                kernel_lr_multiplier, use_bias, epsilon, momentum):
    model = Sequential()
    # conv1
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     input_shape=(channels, img_rows, img_cols),
                     data_format='channels_first',
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     padding='same',
                     use_bias=use_bias,
                     name='conv1'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool1',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn1'))
    model.add(Activation(binary_tanh, name='act1'))
    # conv2
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_first',
                     padding='same',
                     use_bias=use_bias,
                     name='conv2'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool2',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn2'))
    model.add(Activation(binary_tanh, name='act2'))
    # conv3
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_first',
                     padding='same',
                     use_bias=use_bias,
                     name='conv3'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool3',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn3'))
    model.add(Activation(binary_tanh, name='act3'))
    # conv4
    model.add(
        BinaryConv2D(256,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format='channels_first',
                     padding='same',
                     use_bias=use_bias,
                     name='conv4'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool4',
                     data_format='channels_first'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=1,
                           name='bn4'))
    model.add(Activation(binary_tanh, name='act4'))
    model.add(Flatten())
    # dense1
    model.add(
        BinaryDense(256,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense5'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
    model.add(Activation(binary_tanh, name='act5'))
    # dense2
    model.add(
        BinaryDense(classes,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense6'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))

    opt = Adam(lr=lr_start)
    model.compile(loss='squared_hinge', optimizer=opt, metrics=['acc'])
    model.summary()
    return model
コード例 #10
0
ファイル: mnist_mlp.py プロジェクト: xuqy1981/-
X_test /= 255
print(X_train.shape[0], 'train samples')
print(X_test.shape[0], 'test samples')

# convert class vectors to binary class matrices
Y_train = np_utils.to_categorical(y_train,
                                  nb_classes) * 2 - 1  # -1 or 1 for hinge loss
Y_test = np_utils.to_categorical(y_test, nb_classes) * 2 - 1

model = Sequential()
model.add(DropoutNoScale(drop_in, input_shape=(784, ), name='drop0'))
for i in range(num_hidden):
    model.add(
        BinaryDense(num_unit,
                    H=H,
                    W_lr_multiplier=W_lr_multiplier,
                    bias=bias,
                    name='dense{}'.format(i + 1)))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           name='bn{}'.format(i + 1)))
    model.add(Activation(binary_tanh, name='act{}'.format(i + 1)))
    model.add(DropoutNoScale(drop_hidden, name='drop{}'.format(i + 1)))
model.add(
    BinaryDense(10,
                H=H,
                W_lr_multiplier=W_lr_multiplier,
                bias=bias,
                name='dense'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn'))
                 padding='same',
                 use_bias=use_bias,
                 name='conv4'))
model.add(
    MaxPooling2D(pool_size=pool_size,
                 name='pool4',
                 data_format='channels_first'))
model.add(
    BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn4'))
model.add(Activation(binary_tanh, name='act4'))
model.add(Flatten())
# dense1
model.add(
    BinaryDense(128,
                H=H,
                kernel_lr_multiplier=kernel_lr_multiplier,
                use_bias=use_bias,
                name='dense5'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
model.add(Activation(binary_tanh, name='act5'))
# dense2
model.add(
    BinaryDense(classes,
                H=H,
                kernel_lr_multiplier=kernel_lr_multiplier,
                use_bias=use_bias,
                name='dense6'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))

opt = Adam(lr=lr_start)
model.compile(loss='squared_hinge', optimizer=opt, metrics=['acc'])
コード例 #12
0
model.add(
    MaxPooling2D(pool_size=pool_size,
                 name='pool6',
                 data_format='channels_first'))
model.add(
    BatchNormalization(epsilon=epsilon, momentum=momentum, axis=1, name='bn6'))
model.add(Activation(binary_tanh, name='act6'))

# In[10]:

model.add(Flatten())
# dense1
model.add(
    BinaryDense(1024,
                H=H,
                kernel_lr_multiplier=kernel_lr_multiplier,
                use_bias=use_bias,
                name='dense7'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn7'))
model.add(Activation(binary_tanh, name='act7'))
# dense2
model.add(
    BinaryDense(1024,
                H=H,
                kernel_lr_multiplier=kernel_lr_multiplier,
                use_bias=use_bias,
                name='dense8'))
model.add(BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn8'))
model.add(Activation(binary_tanh, name='act8'))
# dense3
model.add(
コード例 #13
0
ファイル: model.py プロジェクト: lyc1599214803/BinaryNet
def Binary_Net(kernel_size, img_rows, img_cols, channels, data_format, H,
               kernel_lr_multiplier, use_bias, epsilon, momentum, classes,
               pool_size):
    model = Sequential()
    # conv1
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     input_shape=(img_rows, img_cols, channels),
                     data_format=data_format,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     padding='same',
                     use_bias=use_bias,
                     name='conv1'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn1'))
    model.add(Activation(binary_tanh, name='act1'))
    # conv2
    model.add(
        BinaryConv2D(128,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format=data_format,
                     padding='same',
                     use_bias=use_bias,
                     name='conv2'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool2',
                     data_format=data_format))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn2'))
    model.add(Activation(binary_tanh, name='act2'))
    # conv3
    model.add(
        BinaryConv2D(256,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format=data_format,
                     padding='same',
                     use_bias=use_bias,
                     name='conv3'))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn3'))
    model.add(Activation(binary_tanh, name='act3'))
    # conv4
    model.add(
        BinaryConv2D(256,
                     kernel_size=kernel_size,
                     H=H,
                     kernel_lr_multiplier=kernel_lr_multiplier,
                     data_format=data_format,
                     padding='same',
                     use_bias=use_bias,
                     name='conv4'))
    model.add(
        MaxPooling2D(pool_size=pool_size,
                     name='pool4',
                     data_format=data_format))
    model.add(
        BatchNormalization(epsilon=epsilon,
                           momentum=momentum,
                           axis=-1,
                           name='bn4'))
    model.add(Activation(binary_tanh, name='act4'))
    model.add(Flatten())
    # dense1
    model.add(
        BinaryDense(1024,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense5'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn5'))
    model.add(Activation(binary_tanh, name='act5'))
    # dense2
    model.add(
        BinaryDense(classes,
                    H=H,
                    kernel_lr_multiplier=kernel_lr_multiplier,
                    use_bias=use_bias,
                    name='dense6'))
    model.add(
        BatchNormalization(epsilon=epsilon, momentum=momentum, name='bn6'))
    return model