def inception_layer(x, conv_1_size, conv_3_reduce_size, conv_3_size, conv_5_reduce_size, conv_5_size, pool_proj_size, name='inception'): """ Create an Inception Layer """ with tf.variable_scope(name) as scope: conv_1 = conv_layer(x, filter_height=1, filter_width=1, num_filters=conv_1_size, name='{}_1x1'.format(name)) conv_3_reduce = conv_layer(x, filter_height=1, filter_width=1, num_filters=conv_3_reduce_size, name='{}_3x3_reduce'.format(name)) conv_3 = conv_layer(conv_3_reduce, filter_height=3, filter_width=3, num_filters=conv_3_size, name='{}_3x3'.format(name)) conv_5_reduce = conv_layer(x, filter_height=1, filter_width=1, num_filters=conv_5_reduce_size, name='{}_5x5_reduce'.format(name)) conv_5 = conv_layer(conv_5_reduce, filter_height=5, filter_width=5, num_filters=conv_5_size, name='{}_5x5'.format(name)) pool = max_pool(x, stride=1, padding='SAME', name='{}_pool'.format(name)) pool_proj = conv_layer(pool, filter_height=1, filter_width=1, num_filters=pool_proj_size, name='{}_pool_proj'.format(name)) return tf.concat([conv_1, conv_3, conv_5, pool_proj], axis=3, name='{}_concat'.format(name))
def reduction_A(x, input_channel, name): # 35x35 -> 17x17 with tf.variable_scope(name) as scope: pool_1 = max_pool(x, [1, 3, 3, 1], 2, 'VALID', 'reduction_A_pool_1') # 1 384 conv_2 = conv_layer(x, [3, 3, input_channel, 384], [384], 2, 'VALID', 'reduction_A_conv_2') # 2 - 3x3 384 conv_3_1 = conv_layer(x, [1, 1, input_channel, 192], [192], 1, 'SAME', 'reduction_A_conv_3_1') # 3 - 1x1 conv_3_2 = conv_layer(conv_3_1, [3, 3, 192, 224], [224], 1, 'SAME', 'reduction_A_conv_3_2') # 3 - 3x3 conv_3_3 = conv_layer(conv_3_2, [3, 3, 224, 256], [256], 2, 'VALID', 'reduction_A_conv_3_3') # 3 - 3x3 256 return tf.concat([pool_1, conv_2, conv_3_3], axis=3, name='{}_concat'.format(name)) # 17x17x1024
def reduction_B(x, input_channel, name): # 17x17 -> 8x8 with tf.variable_scope(name) as scope: pool_1 = max_pool(x, [1, 3, 3, 1], 2, 'VALID', 'reduction_B_pool_1') # 1 1024 conv_2_1 = conv_layer(x, [1, 1, input_channel, 192], [192], 1, 'SAME', 'reduction_B_conv_2_1') # 3 - 1x1 conv_2_2 = conv_layer(conv_2_1, [3, 3, 192, 192], [192], 2, 'VALID', 'reduction_B_conv_2_2') # 2 - 3x3 192 conv_3_1 = conv_layer(x, [1, 1, input_channel, 256], [256], 1, 'SAME', 'reduction_B_conv_3_1') # 3 - 1x1 conv_3_2 = conv_layer(conv_3_1, [1, 7, 256, 256], [256], 1, 'SAME', 'reduction_B_conv_3_2') # 3 - 1x7 conv_3_3 = conv_layer(conv_3_2, [7, 1, 256, 320], [320], 1, 'SAME', 'reduction_B_conv_3_3') # 3 - 7x1 conv_3_4 = conv_layer(conv_3_3, [3, 3, 320, 320], [320], 2, 'VALID', 'reduction_B_conv_3_4') # 3 - 3x3 320 return tf.concat([pool_1, conv_2_2, conv_3_4], axis=3, name='{}_concat'.format(name)) # 8x8x1536
def dcnn(x, keep_prob, img_result): # stem conv_1 = conv_layer(x, [3, 3, 3, 32], [32], 2, 'VALID', 'conv_1') conv_2 = conv_layer(conv_1, [3, 3, 32, 32], [32], 1, 'VALID', 'conv_2') conv_3 = conv_layer(conv_2, [3, 3, 32, 64], [64], 1, 'SAME', 'conv_3') pool_4 = max_pool(conv_3, [1, 3, 3, 1], 2, 'VALID', 'pool_4') # 1 conv_4 = conv_layer(conv_3, [3, 3, 64, 96], [96], 2, 'VALID', 'conv_4') # 2 concat_5 = tf.concat([pool_4, conv_4], axis=3, name='concat_5') # 73x73x160 conv_5_1 = conv_layer(concat_5, [1, 1, 160, 64], [64], 1, 'SAME', 'conv_5_1') # 1 conv_6_1 = conv_layer(conv_5_1, [3, 3, 64, 96], [96], 1, 'VALID', 'conv_6_1') conv_5_2 = conv_layer(concat_5, [1, 1, 160, 64], [64], 1, 'SAME', 'conv_5_2') # 2 conv_6_2 = conv_layer(conv_5_2, [7, 1, 64, 64], [64], 1, 'SAME', 'conv_6_2') conv_7_2 = conv_layer(conv_6_2, [1, 7, 64, 64], [64], 1, 'SAME', 'conv_7_2') conv_8_2 = conv_layer(conv_7_2, [3, 3, 64, 96], [96], 1, 'VALID', 'conv_8_2') concat_9 = tf.concat([conv_6_1, conv_8_2], axis=3, name='concat_9') # 71x71x192 conv_10 = conv_layer(concat_9, [3, 3, 192, 192], [192], 2, 'VALID', 'conv_10') # 1 pool_10 = max_pool(concat_9, [1, 2, 2, 1], 2, 'VALID', 'pool_10') # 1 concat_11 = tf.concat([conv_10, pool_10], axis=3, name='concat_11') # 71x71x192 # print("stem shape : ",concat_11.shape) inception_a_12 = inception_A(concat_11, 384, 'inception_a_12') inception_a_13 = inception_A(inception_a_12, 384, 'inception_a_13') inception_a_14 = inception_A(inception_a_13, 384, 'inception_a_14') inception_a_15 = inception_A(inception_a_14, 384, 'inception_a_15') # 4번 반복 # print("inception A shape : ",inception_a_15.shape) reduction_a_16 = reduction_A(inception_a_15, 384, 'reduction_a_16') # print("reduction A shape : ",reduction_a_16.shape) inception_b_17 = inception_B(reduction_a_16, 1024, 'inception_b_17') inception_b_18 = inception_B(inception_b_17, 1024, 'inception_b_18') inception_b_19 = inception_B(inception_b_18, 1024, 'inception_b_19') inception_b_20 = inception_B(inception_b_19, 1024, 'inception_b_20') inception_b_21 = inception_B(inception_b_20, 1024, 'inception_b_21') inception_b_22 = inception_B(inception_b_21, 1024, 'inception_b_22') inception_b_23 = inception_B(inception_b_22, 1024, 'inception_b_23') # print("inception B shape : ",inception_b_23.shape) reduction_b_24 = reduction_B(inception_b_23, 1024, 'reduction_b_24') # print("reduction B shape : ",reduction_b_24.shape) inception_c_25 = inception_C(reduction_b_24, 1536, 'inception_c_25') inception_c_26 = inception_C(inception_c_25, 1536, 'inception_c_26') inception_c_27 = inception_C(inception_c_26, 1536, 'inception_c_27') # print("inception C shape : ",inception_c_27.shape) pool_28 = avg_pool(inception_c_27, [1, 8, 8, 1], 1, 'VALID', 'avg_pool_28') # 1 - avg_pool dropout = tf.nn.dropout(pool_28, keep_prob) flatten = tf.reshape(dropout, [-1, 1 * 1 * 1536]) logits = fc_layer(flatten, 1536, img_result, 'fc_layer') print("logits : ", logits.shape) return tf.nn.softmax(logits)