def cnn(x): with tf.name_scope('cnn') as scope: with tf.name_scope('cnn_conv1') as inner_scope: wcnn1 = tu.weight([11, 11, 3, 96], name='wcnn1') bcnn1 = tu.bias(0.0, [96], name='bcnn1') conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(4, 4), padding='SAME'), bcnn1) conv1 = tu.relu(conv1) norm1 = tu.lrn(conv1, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75) pool1 = tu.max_pool2d(norm1, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('cnn_conv2') as inner_scope: wcnn2 = tu.weight([5, 5, 96, 256], name='wcnn2') bcnn2 = tu.bias(1.0, [256], name='bcnn2') conv2 = tf.add( tu.conv2d(pool1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2) conv2 = tu.relu(conv2) norm2 = tu.lrn(conv2, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75) pool2 = tu.max_pool2d(norm2, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('cnn_conv3') as inner_scope: wcnn3 = tu.weight([3, 3, 256, 384], name='wcnn3') bcnn3 = tu.bias(0.0, [384], name='bcnn3') conv3 = tf.add( tu.conv2d(pool2, wcnn3, stride=(1, 1), padding='SAME'), bcnn3) conv3 = tu.relu(conv3) with tf.name_scope('cnn_conv4') as inner_scope: wcnn4 = tu.weight([3, 3, 384, 384], name='wcnn4') bcnn4 = tu.bias(1.0, [384], name='bcnn4') conv4 = tf.add( tu.conv2d(conv3, wcnn4, stride=(1, 1), padding='SAME'), bcnn4) conv4 = tu.relu(conv4) with tf.name_scope('cnn_conv5') as inner_scope: wcnn5 = tu.weight([3, 3, 384, 256], name='wcnn5') bcnn5 = tu.bias(1.0, [256], name='bcnn5') conv5 = tf.add( tu.conv2d(conv4, wcnn5, stride=(1, 1), padding='SAME'), bcnn5) conv5 = tu.relu(conv5) pool5 = tu.max_pool2d(conv5, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') return pool5
def cnn(x): """ AlexNet convolutional layers definition Args: x: tensor of shape [batch_size, width, height, channels] Returns: pool5: tensor with all convolutions, pooling and lrn operations applied """ with tf.name_scope('alexnet_cnn') as scope: with tf.name_scope('alexnet_cnn_conv1') as inner_scope: wcnn1 = tu.weight([11, 11, 3, 96], name='wcnn1') bcnn1 = tu.bias(0.0, [96], name='bcnn1') conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(4, 4), padding='SAME'), bcnn1) # conv1 = tu.batch_norm(conv1) conv1 = tu.relu(conv1) norm1 = tu.lrn(conv1, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75) pool1 = tu.max_pool2d(norm1, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('alexnet_cnn_conv2') as inner_scope: wcnn2 = tu.weight([5, 5, 96, 256], name='wcnn2') bcnn2 = tu.bias(1.0, [256], name='bcnn2') conv2 = tf.add(tu.conv2d(pool1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2) # conv2 = tu.batch_norm(conv2) conv2 = tu.relu(conv2) norm2 = tu.lrn(conv2, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75) pool2 = tu.max_pool2d(norm2, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('alexnet_cnn_conv3') as inner_scope: wcnn3 = tu.weight([3, 3, 256, 384], name='wcnn3') bcnn3 = tu.bias(0.0, [384], name='bcnn3') conv3 = tf.add(tu.conv2d(pool2, wcnn3, stride=(1, 1), padding='SAME'), bcnn3) # conv3 = tu.batch_norm(conv3) conv3 = tu.relu(conv3) with tf.name_scope('alexnet_cnn_conv4') as inner_scope: wcnn4 = tu.weight([3, 3, 384, 384], name='wcnn4') bcnn4 = tu.bias(1.0, [384], name='bcnn4') conv4 = tf.add(tu.conv2d(conv3, wcnn4, stride=(1, 1), padding='SAME'), bcnn4) # conv4 = tu.batch_norm(conv4) conv4 = tu.relu(conv4) with tf.name_scope('alexnet_cnn_conv5') as inner_scope: wcnn5 = tu.weight([3, 3, 384, 256], name='wcnn5') bcnn5 = tu.bias(1.0, [256], name='bcnn5') conv5 = tf.add(tu.conv2d(conv4, wcnn5, stride=(1, 1), padding='SAME'), bcnn5) # conv5 = tu.batch_norm(conv5) conv5 = tu.relu(conv5) pool5 = tu.max_pool2d(conv5, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') return pool5
def cnn(x): """ AlexNet convolutional layers definition Args: x: tensor of shape [batch_size, width, height, channels] Returns: pool5: tensor with all convolutions, pooling and lrn operations applied """ with tf.name_scope('alexnet_cnn') as scope: with tf.name_scope('alexnet_cnn_conv1') as inner_scope: wcnn1 = tu.weight([11, 11, 3, 96], name='wcnn1') # bcnn1 = tu.bias(0.0, [96], name='bcnn1') # wcnn1_t = fw(wcnn1) # x_t =fa(cabs(x)) conv1 = tu.conv2d(x, wcnn1, stride=(4, 4), padding='SAME') #conv1 = tu.batch_norm(conv1) conv1 = tf.nn.relu(conv1) norm1 = tu.lrn(conv1, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75) pool1 = tu.max_pool2d(norm1, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('alexnet_cnn_conv2') as inner_scope: wcnn2 = tu.weight([5, 5, 96, 256], name='wcnn2') # bcnn2 = tu.bias(1.0, [256], name='bcnn2') pool1_t = fa(cabs(pool1)) wcnn2_t = fw(wcnn2) conv2 = tu.conv2d(pool1_t, wcnn2_t, stride=(1, 1), padding='SAME') #conv2 = tu.batch_norm(conv2) conv2 = tf.nn.relu(conv2) norm2 = tu.lrn(conv2, depth_radius=2, bias=1.0, alpha=2e-05, beta=0.75) pool2 = tu.max_pool2d(norm2, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('alexnet_cnn_conv3') as inner_scope: wcnn3 = tu.weight([3, 3, 256, 384], name='wcnn3') # bcnn3 = tu.bias(0.0, [384], name='bcnn3') pool2_t = fa(cabs(pool2)) wcnn3_t = fw(wcnn3) conv3 = tu.conv2d(pool2_t, wcnn3_t, stride=(1, 1), padding='SAME') #conv3 = tu.batch_norm(conv3) conv3 = tf.nn.relu(conv3) with tf.name_scope('alexnet_cnn_conv4') as inner_scope: wcnn4 = tu.weight([3, 3, 384, 384], name='wcnn4') # bcnn4 = tu.bias(1.0, [384], name='bcnn4') conv3_t = fa(cabs(conv3)) wcnn4_t = fw(wcnn4) conv4 = tu.conv2d(conv3_t, wcnn4_t, stride=(1, 1), padding='SAME') #conv4 = tu.batch_norm(conv4) conv4 = tf.nn.relu(conv4) with tf.name_scope('alexnet_cnn_conv5') as inner_scope: wcnn5 = tu.weight([3, 3, 384, 256], name='wcnn5') # bcnn5 = tu.bias(1.0, [256], name='bcnn5') conv4_t = fa(cabs(conv4)) wcnn5_t = fw(wcnn5) conv5 = tu.conv2d(conv4_t, wcnn5_t, stride=(1, 1), padding='SAME') #conv5 = tu.batch_norm(conv5) conv5 = tf.nn.relu(conv5) pool5 = tu.max_pool2d(conv5, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') return pool5