def cnn(x): """ CNN model to detect lung cancer Args: x: tensor of shape [batch_size, width, height, channels] Returns: pool2: tensor with all convolutions, pooling applied """ with tf.name_scope('cnn') as scope: with tf.name_scope('conv1') as inner_scope: wcnn1 = tu.weight([3, 3, 1, 64], name='wcnn1') bcnn1 = tu.bias(1.0, [64], name='bcnn1') conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(1, 1), padding='SAME'), bcnn1) conv1 = tu.relu(conv1) # (?, 192, 192, 64) with tf.name_scope('conv2') as inner_scope: wcnn2 = tu.weight([3, 3, 64, 64], name='wcnn2') bcnn2 = tu.bias(1.0, [64], name='bcnn2') conv2 = tf.add(tu.conv2d(conv1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2) conv2 = tu.relu(conv2) #(?, 192, 192, 64) with tf.name_scope('max_pool') as inner_scope: pool1 = tu.max_pool2d(conv2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], padding='SAME') # (?, 96, 96, 64) with tf.name_scope('conv3') as inner_scope: wcnn3 = tu.weight([3, 3, 64, 64], name='wcnn3') bcnn3 = tu.bias(1.0, [64], name='bcnn3') conv3 = tf.add(tu.conv2d(pool1, wcnn3, stride=(1, 1), padding='SAME'), bcnn3) conv3 = tu.relu(conv3) # (?, 96, 96, 64) with tf.name_scope('conv4') as inner_scope: wcnn4 = tu.weight([3, 3, 64, 64], name='wcnn4') bcnn4 = tu.bias(1.0, [64], name='bcnn4') conv4 = tf.add(tu.conv2d(conv3, wcnn4, stride=(1, 1), padding='SAME'), bcnn4) conv4 = tu.relu(conv4) # (?, 96, 96, 64) with tf.name_scope('conv5') as inner_scope: wcnn5 = tu.weight([3, 3, 64, 64], name='wcnn5') bcnn5 = tu.bias(1.0, [64], name='bcnn5') conv5 = tf.add(tu.conv2d(conv4, wcnn5, stride=(1, 1), padding='SAME'), bcnn5) conv5 = tu.relu(conv5) # (?, 96, 96, 64) with tf.name_scope('max_pool') as inner_scope: pool2 = tu.max_pool2d(conv5, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], padding='SAME') # (?, 48, 48, 64) return pool2
def classifier(x): #parameters for convolution kernels IMG_DEPTH = 1 C1_KERNEL_SIZE, C2_KERNEL_SIZE, C3_KERNEL_SIZE = 5, 5, 5 C1_OUT_CHANNELS, C2_OUT_CHANNELS, C3_OUT_CHANNELS = 6, 16, 120 C1_STRIDES, C2_STRIDES, C3_STRIDES = 1, 1, 1 P1_SIZE, P2_SIZE = 2, 2 P1_STRIDE, P2_STRIDE = 2, 2 F4_SIZE, F5_SIZE = 84, 10 C1_kernel = util.weights( [C1_KERNEL_SIZE, C1_KERNEL_SIZE, IMG_DEPTH, C1_OUT_CHANNELS], 0.1, 'C1_kernel') C2_kernel = util.weights( [C2_KERNEL_SIZE, C2_KERNEL_SIZE, C1_OUT_CHANNELS, C2_OUT_CHANNELS], 0.1, 'C2_kernel') C3_kernel = util.weights( [C3_KERNEL_SIZE, C3_KERNEL_SIZE, C2_OUT_CHANNELS, C3_OUT_CHANNELS], 0.1, 'C3_kernel') C1_bias = util.bias([C1_OUT_CHANNELS], 'C1_bias') C2_bias = util.bias([C2_OUT_CHANNELS], 'C2_bias') C3_bias = util.bias([C3_OUT_CHANNELS], 'C3_bias') #LeNet-5 structure C1 = util.convLayer(x, C1_kernel, C1_STRIDES, 'SAME') ReLU1 = tf.nn.relu(C1 + C1_bias) P1 = util.max_pool(ReLU1, P1_SIZE, P1_STRIDE) C2 = util.convLayer(P1, C2_kernel, C2_STRIDES, 'SAME') ReLU2 = tf.nn.relu(C2 + C2_bias) P2 = util.max_pool(ReLU2, P2_SIZE, P2_STRIDE) C3 = util.convLayer(P2, C3_kernel, C3_STRIDES, 'SAME') ReLU3 = tf.nn.relu(C3 + C3_bias) num_F4_in = (int)(ReLU3.shape[1] * ReLU3.shape[2] * ReLU3.shape[3]) F4_in = tf.reshape(ReLU3, [-1, num_F4_in]) F4_weights = util.weights([num_F4_in, F4_SIZE], 0.1, 'F4_weights') F4_bias = util.bias([F4_SIZE], 'F4_bias') F4 = tf.matmul(F4_in, F4_weights) ReLU4 = tf.nn.relu(F4 + F4_bias) F5_weights = util.weights([F4_SIZE, F5_SIZE], 0.1, 'F5_weights') F5_bias = util.bias([F5_SIZE], 'F5_bias') F5 = tf.matmul(ReLU4, F5_weights) + F5_bias return F5
def classifier(x): #Initialize parameters in nn F1_weights = util.weights([784, 784], 0.01, 'F1_weights') F1_bias = util.bias([784], 'F1_bias') F2_weights = util.weights([784, 10], 0.01, 'F2_weights') F2_bias = util.bias([10], 'F2_bias') F1 = tf.matmul(x, F1_weights) + F1_bias ReLU1 = tf.nn.relu(F1) F2 = tf.matmul(ReLU1, F2_weights) + F2_bias y = tf.nn.softmax(F2) return y
def classifier(x, dropout): """ AlexNet fully connected layers definition Args: x: tensor of shape [batch_size, width, height, channels] dropout: probability of non dropping out units Returns: fc3: 1000 linear tensor taken just before applying the softmax operation it is needed to feed it to tf.softmax_cross_entropy_with_logits() softmax: 1000 linear tensor representing the output probabilities of the image to classify """ pool5 = alexnet(x) dim = pool5.get_shape().as_list() flat_dim = dim[1] * dim[2] * dim[3] # 6 * 6 * 256 flat = tf.reshape(pool5, [-1, flat_dim]) with tf.name_scope('classifier') as scope: with tf.name_scope('fullyconected1') as inner_scope: wfc1 = tu.weight([flat_dim, 4096], name='wfc1') bfc1 = tu.bias(0.0, [4096], name='bfc1') fc1 = tf.add(tf.matmul(flat, wfc1), bfc1) #fc1 = tu.batch_norm(fc1) fc1 = tu.relu(fc1) fc1 = tf.nn.dropout(fc1, dropout) with tf.name_scope('fullyconected2') as inner_scope: wfc2 = tu.weight([4096, 4096], name='wfc2') bfc2 = tu.bias(0.0, [4096], name='bfc2') fc2 = tf.add(tf.matmul(fc1, wfc2), bfc2) #fc2 = tu.batch_norm(fc2) fc2 = tu.relu(fc2) fc2 = tf.nn.dropout(fc2, dropout) with tf.name_scope('classifier_output') as inner_scope: wfc3 = tu.weight([4096, 1000], name='wfc3') bfc3 = tu.bias(0.0, [1000], name='bfc3') fc3 = tf.add(tf.matmul(fc2, wfc3), bfc3) softmax = tf.nn.softmax(fc3) return fc3, softmax
def classifier(x, dropout): """cnn fully connected layers definition Args: x: tensor of shape [batch_size, width, height, channels] dropout: probability of non dropping out units Returns: fc3: 2 linear tensor taken just before applying the softmax operation it is needed to feed it to tf.softmax_cross_entropy_with_logits() softmax: 2 linear tensor representing the output probabilities of the image to classify """ pool2 = cnn(x) dim = pool2.get_shape().as_list() flat_dim = dim[1] * dim[2] * dim[3] # 48 * 48 * 64 flat = tf.reshape(pool2, [-1, flat_dim]) with tf.name_scope('classifier') as scope: with tf.name_scope('fullyconected1') as inner_scope: wfc1 = tu.weight([flat_dim, 500], name='wfc1') bfc1 = tu.bias(1.0, [500], name='bfc1') fc1 = tf.add(tf.matmul(flat, wfc1), bfc1) fc1 = tu.relu(fc1) fc1 = tf.nn.dropout(fc1, dropout) with tf.name_scope('fullyconected2') as inner_scope: wfc2 = tu.weight([500, 100], name='wfc2') bfc2 = tu.bias(1.0, [100], name='bfc2') fc2 = tf.add(tf.matmul(fc1, wfc2), bfc2) fc2 = tu.relu(fc2) fc2 = tf.nn.dropout(fc2, dropout) with tf.name_scope('classifier_output') as inner_scope: wfc3 = tu.weight([100, 2], name='wfc3') bfc3 = tu.bias(1.0, [2], name='bfc3') fc3 = tf.add(tf.matmul(fc2, wfc3), bfc3) softmax = tf.nn.softmax(fc3) return fc3, softmax
def cnn(x): """ CNN model to detect lung cancer Args: x: tensor of shape [batch_size, width, height, channels] Returns: pool2: tensor with all convolutions, pooling applied """ with tf.name_scope('cnn') as scope: with tf.name_scope('conv1') as inner_scope: wcnn1 = tu.weight([3, 3, 1, 64], name='wcnn1') bcnn1 = tu.bias(1.0, [64], name='bcnn1') conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(1, 1), padding='SAME'), bcnn1) conv1 = tu.relu(conv1) # (?, 192, 192, 64) with tf.name_scope('conv2') as inner_scope: wcnn2 = tu.weight([3, 3, 64, 64], name='wcnn2') bcnn2 = tu.bias(1.0, [64], name='bcnn2') conv2 = tf.add( tu.conv2d(conv1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2) conv2 = tu.relu(conv2) #(?, 192, 192, 64) with tf.name_scope('max_pool') as inner_scope: pool1 = tu.max_pool2d(conv2, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], padding='SAME') # (?, 96, 96, 64) with tf.name_scope('conv3') as inner_scope: wcnn3 = tu.weight([3, 3, 64, 64], name='wcnn3') bcnn3 = tu.bias(1.0, [64], name='bcnn3') conv3 = tf.add( tu.conv2d(pool1, wcnn3, stride=(1, 1), padding='SAME'), bcnn3) conv3 = tu.relu(conv3) # (?, 96, 96, 64) with tf.name_scope('conv4') as inner_scope: wcnn4 = tu.weight([3, 3, 64, 64], name='wcnn4') bcnn4 = tu.bias(1.0, [64], name='bcnn4') conv4 = tf.add( tu.conv2d(conv3, wcnn4, stride=(1, 1), padding='SAME'), bcnn4) conv4 = tu.relu(conv4) # (?, 96, 96, 64) with tf.name_scope('conv5') as inner_scope: wcnn5 = tu.weight([3, 3, 64, 64], name='wcnn5') bcnn5 = tu.bias(1.0, [64], name='bcnn5') conv5 = tf.add( tu.conv2d(conv4, wcnn5, stride=(1, 1), padding='SAME'), bcnn5) conv5 = tu.relu(conv5) # (?, 96, 96, 64) with tf.name_scope('max_pool') as inner_scope: pool2 = tu.max_pool2d(conv5, kernel=[1, 2, 2, 1], stride=[1, 2, 2, 1], padding='SAME') # (?, 48, 48, 64) return pool2
def classifier(x): #parameters for convolution kernels IMG_DEPTH = 1 C1_KERNEL_SIZE, C2_KERNEL_SIZE, C3_KERNEL_SIZE, C4_KERNEL_SIZE, C5_KERNEL_SIZE = 11, 5, 3, 3, 3 C1_OUT_CHANNELS, C2_OUT_CHANNELS, C3_OUT_CHANNELS, C4_OUT_CHANNELS, C5_OUT_CHANNELS = 96, 256, 384, 384, 256 #C1_OUT_CHANNELS,C2_OUT_CHANNELS,C3_OUT_CHANNELS,C4_OUT_CHANNELS,C5_OUT_CHANNELS=48,128,197,197,128 C1_STRIDES, C2_STRIDES, C3_STRIDES, C4_STRIDES, C5_STRIDES = 1, 1, 1, 1, 1 P1_SIZE, P2_SIZE, P5_SIZE = 3, 3, 3 P1_STRIDE, P2_STRIDE, P5_STRIDE = 2, 2, 2 F6_SIZE, F7_SIZE = 4096, 4096 F8_SIZE = 10 #convolution kernels and bias C1_kernel = util.weights( [C1_KERNEL_SIZE, C1_KERNEL_SIZE, IMG_DEPTH, C1_OUT_CHANNELS], 0.01, 'C1_kernel') C2_kernel = util.weights( [C2_KERNEL_SIZE, C2_KERNEL_SIZE, C1_OUT_CHANNELS, C2_OUT_CHANNELS], 0.01, 'C2_kernel') C3_kernel = util.weights( [C3_KERNEL_SIZE, C3_KERNEL_SIZE, C2_OUT_CHANNELS, C3_OUT_CHANNELS], 0.01, 'C3_kernel') C4_kernel = util.weights( [C4_KERNEL_SIZE, C4_KERNEL_SIZE, C3_OUT_CHANNELS, C4_OUT_CHANNELS], 0.01, 'C4_kernel') C5_kernel = util.weights( [C5_KERNEL_SIZE, C5_KERNEL_SIZE, C4_OUT_CHANNELS, C5_OUT_CHANNELS], 0.01, 'C5_kernel') C1_bias = util.bias([C1_OUT_CHANNELS], 'C1_bias') C2_bias = util.bias([C2_OUT_CHANNELS], 'C2_bias') C3_bias = util.bias([C3_OUT_CHANNELS], 'C3_bias') C4_bias = util.bias([C4_OUT_CHANNELS], 'C4_bias') C5_bias = util.bias([C5_OUT_CHANNELS], 'C5_bias') #AlexNet network #Conv layer 1 C1 = util.convLayer(x, C1_kernel, C1_STRIDES, 'SAME') ReLU1 = tf.nn.relu(C1 + C1_bias) P1 = util.max_pool(ReLU1, P1_SIZE, P1_STRIDE) NORM1 = tf.nn.local_response_normalization(P1) #Conv layer 2 C2 = util.convLayer(NORM1, C2_kernel, C2_STRIDES, 'SAME') ReLU2 = tf.nn.relu(C2 + C2_bias) P2 = util.max_pool(ReLU2, P2_SIZE, P2_STRIDE) NORM2 = tf.nn.local_response_normalization(P2) #Conv layer 3 C3 = util.convLayer(NORM2, C3_kernel, C3_STRIDES, 'SAME') ReLU3 = tf.nn.relu(C3 + C3_bias) #Conv layer 4 C4 = util.convLayer(ReLU3, C4_kernel, C4_STRIDES, 'SAME') ReLU4 = tf.nn.relu(C4 + C4_bias) #Conv layer 5 C5 = util.convLayer(ReLU4, C5_kernel, C5_STRIDES, 'SAME') ReLU5 = tf.nn.relu(C5 + C5_bias) P5_pre = util.max_pool(ReLU5, P5_SIZE, P5_STRIDE) num_P5_out = (int)(P5_pre.shape[1] * P5_pre.shape[2] * P5_pre.shape[3]) P5 = tf.reshape(P5_pre, [-1, num_P5_out]) #Fully connected layer 6 F6_weights = util.weights([num_P5_out, F6_SIZE], 0.01, 'F6_weights') F6_bias = util.bias([F6_SIZE], 'F6_bias') F6 = tf.matmul(P5, F6_weights) ReLU6 = tf.nn.relu(F6 + F6_bias) DROP6 = tf.nn.dropout(ReLU6, 0.5) #Fully connected layer 7 F7_weights = util.weights([F6_SIZE, F7_SIZE], 0.01, 'F7_weights') F7_bias = util.bias([F7_SIZE], 'F7_bias') F7 = tf.matmul(DROP6, F7_weights) ReLU7 = tf.nn.relu(F7 + F7_bias) DROP7 = tf.nn.dropout(ReLU7, 0.5) #Fully connected layer 8 F8_weights = util.weights([F7_SIZE, F8_SIZE], 0.01, 'F8_weights') F8_bias = util.bias([F8_SIZE], 'F8_bias') logits = tf.matmul(DROP7, F8_weights) + F8_bias return logits
def alexnet(x): """ AlexNet conv layers definition Args: x: tensor of shape[batch_size,width,height,channels] Returns: pool5: tensor with all convolutions ,pooling and lrn operations applied """ with tf.name_scope('alexnetwork') as scope: with tf.name_scope('conv1') as inner_scope: wcnn1 = tu.weight([11, 11, 3, 96], name='wcnn1') bcnn1 = tu.bias(0.0, [96], name='bcnn1') conv1 = tf.add(tu.conv2d(x, wcnn1, stride=(4, 4), padding='SAME'), bcnn1) #conv1 = tu.batch_norm(conv1) conv1 = tu.relu(conv1) norm1 = tu.lrn(conv1, depth_radius=5, bias=1.0, alpha=1e-04, beta=0.75) pool1 = tu.max_pool2d(norm1, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('conv2') as inner_scope: wcnn2 = tu.weights([5, 5, 96, 256], name='wcnn2') bcnn2 = tu.bias(1.0, [256], name='bcnn2') conv2 = tf.add( tu.conv2d(pool1, wcnn2, stride=(1, 1), padding='SAME'), bcnn2) #conv2 = tu.batch_norm(conv2) conv2 = tu.relu(conv2) norm2 = tu.lrn(conv2, depth_radius=5, bias=1.0, alpha=1e-04, beta=0.75) pool2 = tu.max_pool2d(norm2, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') with tf.name_scope('conv3') as inner_scope: wcnn3 = tu.weights([3, 3, 256, 384], name='wcnn3') bcnn3 = tu.bias(0.0, [384], name='bcnn3') conv3 = tf.add( tu.conv2d(pool2, wcnn3, stride=(1, 1), padding='SAME'), bcnn3) #conv3 = tu.batch_norm(conv3) conv3 = tu.relu(conv3) with tf.name_scope('conv4') as inner_scope: wcnn4 = tu.weight([3, 3, 384, 384], name='wcnn4') bcnn4 = tu.bias(1.0, [384], name='bcnn4') conv4 = tf.add( tu.conv2d(conv3, wcnn5, stride=(1, 1), padding='SAME'), bcnn5) #conv5 = tu.batch_norm(conv5) conv5 = tu.relu(conv5) pool5 = tu.max_pool2d(conv5, kernel=[1, 3, 3, 1], stride=[1, 2, 2, 1], padding='VALID') return pool5