def model(input_shape, n_classes): in_net = Input(shape=input_shape, name='input') net = Conv2d(64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn1')(in_net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) net = TernaryConv2d(64, (5, 5), (1, 1), act=tf.nn.relu, padding='SAME', name='cnn2')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) net = Flatten(name='flatten')(net) net = TernaryDense(384, act=tf.nn.relu, name='d1relu')(net) net = TernaryDense(192, act=tf.nn.relu, name='d2relu')(net) net = Dense(n_classes, act=None, name='output')(net) net = Model(inputs=in_net, outputs=net, name='dorefanet') return net
def get_model(inputs_shape): # self defined initialization W_init = tl.initializers.truncated_normal(stddev=5e-2) W_init2 = tl.initializers.truncated_normal(stddev=0.04) b_init2 = tl.initializers.constant(value=0.1) # build network ni = Input(inputs_shape) nn = Conv2d(64, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv1')(ni) nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(nn) nn = LocalResponseNorm(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name="norm1")(nn) nn = Conv2d(64, (5, 5), (1, 1), padding='SAME', act=tf.nn.relu, W_init=W_init, b_init=None, name='conv2')(nn) nn = LocalResponseNorm(depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name="norm2")(nn) nn = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(nn) nn = Flatten(name='flatten')(nn) nn = Dense(384, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='dense1relu')(nn) nn = Dense(192, act=tf.nn.relu, W_init=W_init2, b_init=b_init2, name='dense2relu')(nn) nn = Dense(10, act=None, W_init=W_init2, name='output')(nn) M = Model(inputs=ni, outputs=nn, name='cnn') return M
def dorefanet_model(input_shape, n_classes): in_net = Input(shape=input_shape, name='input') net = Conv2d(32, (5, 5), (1, 1), act='relu', padding='SAME', name='conv1')(in_net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) net = tl.layers.Sign("sign")(net) net = DorefaConv2d(8, 32, 64, (5, 5), (1, 1), act='relu', padding='SAME', name='DorefaConv1')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) net = Flatten(name='flatten')(net) net = DorefaDense(8, 16, 384, act='relu', name='DorefaDense1')(net) net = DorefaDense(8, 16, 192, act='relu', name='DorefaDense2')(net) net = Dense(n_classes, act=None, name='output')(net) net = Model(inputs=in_net, outputs=net, name='dorefanet') return net
def binary_model(input_shape, n_classes): in_net = Input(shape=input_shape, name='input') net = Conv2d(64, (5, 5), (1, 1), act='relu', padding='SAME', name='conv1')(in_net) net = Sign(name='sign1')(net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm1')(net) net = BinaryConv2d(64, (5, 5), (1, 1), act='relu', padding='SAME', name='bconv1')(net) net = LocalResponseNorm(4, 1.0, 0.001 / 9.0, 0.75, name='norm2')(net) net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net) net = Flatten(name='flatten')(net) net = Sign(name='sign2')(net) net = BinaryDense(384, act='relu', name='d1relu')(net) net = Sign(name='sign3')(net) net = BinaryDense(192, act='relu', name='d2relu')(net) net = Dense(n_classes, act=None, name='output')(net) net = Model(inputs=in_net, outputs=net, name='binarynet') return net