def binary_cifar10_sbn(input, training=True): out = layers.binaryConv2d(input, 4, [3,3], [1,1], padding='VALID', use_bias=True, binarize_input=False, name='bc_conv2d_1_layer1') out = layers.spatial_shift_batch_norm(out, training=training, name='batchNormalization1_layer1') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_1_layer2') out = tf.layers.max_pooling2d(out, [2,2], [2,2]) out = layers.spatial_shift_batch_norm(out, training=training, name='batchNormalization2_layer2') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_2_layer3') out = layers.spatial_shift_batch_norm(out, training=training, name='batchNormalization3_layer3') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_3_layer4') out = tf.layers.max_pooling2d(out, [2,2], [2,2]) out = layers.spatial_shift_batch_norm(out, training=training, name='batchNormalization4_layer4') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_4_layer5') out = layers.spatial_shift_batch_norm(out, training=training, name='batchNormalization5_layer5') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_5_layer6') out = tf.layers.max_pooling2d(out, [2,2], [2,2]) out = layers.spatial_shift_batch_norm(out, training=training, name='batchNormalization6_layer6') out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 4, use_bias=True, name='binary_dense_1_layer7') out = layers.shift_batch_norm(out, training=training, name='batchNormalization7_layer7') out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 4, use_bias=True, name='binary_dense_2_layer8') out = layers.shift_batch_norm(out, training=training, name='batchNormalization8_layer8') out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 10, name='binary_dense_3_layer9') output = layers.shift_batch_norm(out, training=training, name='batchNormalization9_layer9') return input, output
def binary_cifar10(input, training=True): # This function is used only at the first layer of the model as we dont want to binarized the RGB images out = layers.binaryConv2d(input, 4, [3,3], [1,1], padding='VALID', use_bias=True, binarize_input=False, name='bc_conv2d_1_layer1') out = tf.layers.batch_normalization(out, training=training, name="batchNormalization1_layer1") out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_1_layer2') out = tf.layers.max_pooling2d(out, [2,2], [2,2]) out = tf.layers.batch_normalization(out, training=training, name="batchNormalization2_layer2") out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_2_layer3') out = tf.layers.batch_normalization(out, training=training, name="batchNormalization3_layer3") out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_3_layer4') out = tf.layers.max_pooling2d(out, [2,2], [2,2]) out = tf.layers.batch_normalization(out, training=training, name="batchNormalization4_layer4") out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_4_layer5') out = tf.layers.batch_normalization(out, training=training, name="batchNormalization5_layer5") out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 4, [3,3], [1,1], padding='SAME', use_bias=True, name='bnn_conv2d_5_layer6') out = tf.layers.max_pooling2d(out, [2,2], [2,2]) out = tf.layers.batch_normalization(out, training=training, name="batchNormalization6_layer6") out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 4, use_bias=True, name='binary_dense_1_layer7') out = tf.layers.batch_normalization(out, training=training, name="batchNormalization7_layer7") out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 4, use_bias=True, name='binary_dense_2_layer8') out = tf.layers.batch_normalization(out, training=training, name="batchNormalization8_layer8") out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 10, name='binary_dense_3_layer9') output = tf.layers.batch_normalization(out, training=training, name="batchNormalization9_layer9") return input, output
def binary_cifar10(input, training=True): out = layers.binaryConv2d(input, 128, [3, 3], [1, 1], padding='VALID', binarize_input=False, name='bc_conv2d_1') out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 128, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_1') out = tf.layers.max_pooling2d(out, [2, 2], [2, 2]) out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 256, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_2') out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 256, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_3') out = tf.layers.max_pooling2d(out, [2, 2], [2, 2]) out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 512, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_4') out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 512, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_5') out = tf.layers.max_pooling2d(out, [2, 2], [2, 2]) out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 1024, activation=None, name='binary_dense_1') out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 1024, activation=None, name='binary_dense_2') out = tf.layers.batch_normalization(out, training=training) out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 10, activation=None, name='binary_dense_3') output = tf.layers.batch_normalization(out, training=training) return input, output
def binary_cifar10_sbn(input, training=True): out = layers.binaryConv2d(input, 128, [3, 3], [1, 1], padding='VALID', binarize_input=False, name='bc_conv2d_1') out = layers.spatial_shift_batch_norm(out, training=training, name='shift_batch_norm_1') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 128, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_1') out = tf.layers.max_pooling2d(out, [2, 2], [2, 2]) out = layers.spatial_shift_batch_norm(out, training=training, name='shift_batch_norm_2') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 256, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_2') out = layers.spatial_shift_batch_norm(out, training=training, name='shift_batch_norm_3') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 256, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_3') out = tf.layers.max_pooling2d(out, [2, 2], [2, 2]) out = layers.spatial_shift_batch_norm(out, training=training, name='shift_batch_norm_4') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 512, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_4') out = layers.spatial_shift_batch_norm(out, training=training, name='shift_batch_norm_5') out = tf.clip_by_value(out, -1, 1) out = layers.binaryConv2d(out, 512, [3, 3], [1, 1], padding='SAME', name='bnn_conv2d_5') out = tf.layers.max_pooling2d(out, [2, 2], [2, 2]) out = layers.spatial_shift_batch_norm(out, training=training, name='shift_batch_norm_6') out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 1024, name='binary_dense_1') out = layers.shift_batch_norm(out, training=training, name='shift_batch_norm_7') out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 1024, name='binary_dense_2') out = layers.shift_batch_norm(out, training=training, name='shift_batch_norm_8') out = tf.clip_by_value(out, -1, 1) out = layers.binaryDense(out, 10, name='binary_dense_3') output = layers.shift_batch_norm(out, training=training, name='shift_batch_norm_9') return input, output