def model_fn(features, labels, mode, params): x = tf.reshape(features, [-1, 99, 161, 1], name='input_incep4') x_norm = tf.layers.batch_normalization(x, training=mode == tf.estimator.ModeKeys.TRAIN, name='x_norm') if params['verbose_summary']: tf.summary.image('input', x) conv1 = tf.layers.conv2d(x_norm, filters=16, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv1') conv1b = tf.layers.conv2d(conv1, filters=16, kernel_size=3, activation=tf.nn.relu, name='conv1b') pool1 = tf.layers.max_pooling2d(conv1b, pool_size=[2, 2], strides=2, name='pool1') if params['verbose_summary']: log_conv_kernel('conv1') log_conv_kernel('conv1b') tf.summary.image('pool1', pool1[:, :, :, 0:1]) incep2 = inception_block(conv1, name='incep2') incep3 = inception_block(incep2, t1x1=4, t3x3=4, t5x5=4, tmp=4, name='incep3') incep4 = inception_block(incep3, t1x1=8, t3x3=8, t5x5=8, tmp=8, name='incep4') incep5 = inception_block(incep4, t1x1=16, t3x3=16, t5x5=16, tmp=16, name='incep5') incep6 = inception_block(incep5, t1x1=20, t3x3=20, t5x5=20, tmp=20, name='incep6') flat = flatten(incep6) dropout4 = tf.layers.dropout(flat, rate=params['dropout_rate'], training=mode == tf.estimator.ModeKeys.TRAIN, name='dropout4') dense4 = tf.layers.dense(dropout4, units=2048, activation=tf.nn.relu, name='dense4') logits = tf.layers.dense(dense4, units=params['num_classes'], name='logits') predictions = { 'classes': tf.argmax(logits, axis=1, name='prediction_classes'), 'probabilities': tf.nn.softmax(logits, name='prediction_softmax') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions={'predictions': predictions['probabilities']}) onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=params['num_classes'], name='onehot_labels') loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits) tf.summary.scalar('loss', loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate=params['learning_rate']) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) eval_metric_ops = { 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes']) } tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1]) return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops )
def conv_group(prev, filters, name='conv_group', verbose=False): with tf.variable_scope(name): conv = tf.layers.conv2d(prev, filters=filters, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv_same') convb = tf.layers.conv2d(conv, filters=filters, kernel_size=3, activation=tf.nn.relu, name='convb') convc = tf.layers.conv2d(convb, filters=filters, kernel_size=3, activation=tf.nn.relu, name='convc') pool = tf.layers.max_pooling2d(convc, pool_size=[2, 2], strides=2, name='pool') if verbose: log_conv_kernel('{}/conv_same'.format(name)) log_conv_kernel('{}/convb'.format(name)) log_conv_kernel('{}/convc'.format(name)) tf.summary.image('{}/pool'.format(name), pool[:, :, :, 0:1]) return pool
def model_fn(features, labels, mode, params): x = tf.reshape(features, [-1, 125, 161, 2], name='input_incep100_lg') x_norm = tf.layers.batch_normalization(x, training=mode == tf.estimator.ModeKeys.TRAIN, name='x_norm') if params['verbose_summary']: with tf.variable_scope('input'): for sample in range(3): tf.summary.image('input_spec_{}'.format(sample), tf.reshape(x[sample, :, :, 0], [-1, 125, 161, 1])) tf.summary.image('input_freq_{}'.format(sample), tf.reshape(x[sample, :, :, 1], [-1, 125, 161, 1])) conv1 = tf.layers.conv2d(x_norm, filters=16, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv1') conv1b = tf.layers.conv2d(conv1, filters=16, kernel_size=3, activation=tf.nn.relu, name='conv1b') pool1 = tf.layers.max_pooling2d(conv1b, pool_size=[2, 2], strides=2, name='pool1') if params['verbose_summary']: log_conv_kernel('conv1') log_conv_kernel('conv1b') tf.summary.image('pool1', pool1[:, :, :, 0:1]) incep2 = inception_block(pool1, t1x1=8, t3x3=8, t5x5=8, tmp=8, name='incep2') conv3 = tf.layers.conv2d(incep2, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv3') conv3b = tf.layers.conv2d(conv3, filters=32, kernel_size=3, activation=tf.nn.relu, name='conv3b') pool3 = tf.layers.max_pooling2d(conv3b, pool_size=[2, 2], strides=2, name='pool3') if params['verbose_summary']: log_conv_kernel('conv3') log_conv_kernel('conv3b') tf.summary.image('pool3', pool3[:, :, :, 0:1]) conv5 = tf.layers.conv2d(pool3, filters=64, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv5') conv5b = tf.layers.conv2d(conv5, filters=64, kernel_size=3, activation=tf.nn.relu, name='conv5b') pool5 = tf.layers.max_pooling2d(conv5b, pool_size=[2, 2], strides=2, name='pool5') if params['verbose_summary']: log_conv_kernel('conv5') log_conv_kernel('conv5b') tf.summary.image('pool5', pool5[:, :, :, 0:1]) incep6 = inception_block(pool5, t1x1=32, t3x3=32, t5x5=32, tmp=32, name='incep6') conv7 = tf.layers.conv2d(incep6, filters=128, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv7') conv7b = tf.layers.conv2d(conv7, filters=128, kernel_size=3, activation=tf.nn.relu, name='conv7b') pool7 = tf.layers.max_pooling2d(conv7b, pool_size=[2, 2], strides=2, name='pool7') if params['verbose_summary']: log_conv_kernel('conv7') log_conv_kernel('conv7b') tf.summary.image('pool7', pool7[:, :, :, 0:1]) conv8 = tf.layers.conv2d(pool7, filters=256, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv8') conv8b = tf.layers.conv2d(conv8, filters=256, kernel_size=3, activation=tf.nn.relu, name='conv8b') pool8 = tf.layers.max_pooling2d(conv8b, pool_size=[2, 2], strides=2, name='pool8') if params['verbose_summary']: log_conv_kernel('conv8') log_conv_kernel('conv8b') tf.summary.image('pool8', pool8[:, :, :, 0:1]) flat = flatten(pool8) dense4 = tf.layers.dense(flat, units=2048, activation=tf.nn.relu, name='dense4') dropout4 = tf.layers.dropout(dense4, rate=params['dropout_rate'], training=mode == tf.estimator.ModeKeys.TRAIN, name='dropout4') logits = tf.layers.dense(dropout4, units=params['num_classes'], name='logits') predictions = { 'classes': tf.argmax(logits, axis=1, name='prediction_classes'), 'probabilities': tf.nn.softmax(logits, name='prediction_softmax') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec(mode=mode, predictions={'predictions': predictions['probabilities']}) onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=params['num_classes'], name='onehot_labels') loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits) tf.summary.scalar('loss', loss) optimizer = tf.train.GradientDescentOptimizer(learning_rate=params['learning_rate']) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) eval_metric_ops = { 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes']) } tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1]) return tf.estimator.EstimatorSpec( mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops )
def branch_incep(x, name, mode, params): with tf.variable_scope(name): conv1 = tf.layers.conv2d(x, filters=16, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv1') conv1b = tf.layers.conv2d(conv1, filters=16, kernel_size=3, activation=tf.nn.relu, name='conv1b') pool1 = tf.layers.max_pooling2d(conv1b, pool_size=[2, 2], strides=2, name='pool1') if params['verbose_summary']: log_conv_kernel('{}/conv1'.format(name)) log_conv_kernel('{}/conv1b'.format(name)) tf.summary.image('pool1', pool1[:, :, :, 0:1]) incep2 = inception_block(pool1, t1x1=8, t3x3=8, t5x5=8, tmp=8, name='incep2') conv3 = tf.layers.conv2d(incep2, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv3') conv3b = tf.layers.conv2d(conv3, filters=32, kernel_size=3, activation=tf.nn.relu, name='conv3b') pool3 = tf.layers.max_pooling2d(conv3b, pool_size=[2, 2], strides=2, name='pool3') if params['verbose_summary']: log_conv_kernel('{}/conv3'.format(name)) log_conv_kernel('{}/conv3b'.format(name)) tf.summary.image('pool3', pool3[:, :, :, 0:1]) conv5 = tf.layers.conv2d(pool3, filters=64, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv5') conv5b = tf.layers.conv2d(conv5, filters=64, kernel_size=3, activation=tf.nn.relu, name='conv5b') pool5 = tf.layers.max_pooling2d(conv5b, pool_size=[2, 2], strides=2, name='pool5') if params['verbose_summary']: log_conv_kernel('{}/conv5'.format(name)) log_conv_kernel('{}/conv5b'.format(name)) tf.summary.image('pool5', pool5[:, :, :, 0:1]) incep6 = inception_block(pool5, t1x1=32, t3x3=32, t5x5=32, tmp=32, name='incep6') conv7 = tf.layers.conv2d(incep6, filters=128, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv7') conv7b = tf.layers.conv2d(conv7, filters=128, kernel_size=3, activation=tf.nn.relu, name='conv7b') pool7 = tf.layers.max_pooling2d(conv7b, pool_size=[2, 2], strides=2, name='pool7') if params['verbose_summary']: log_conv_kernel('{}/conv7'.format(name)) log_conv_kernel('{}/conv7b'.format(name)) tf.summary.image('pool7', pool7[:, :, :, 0:1]) conv8 = tf.layers.conv2d(pool7, filters=256, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv8') conv8b = tf.layers.conv2d(conv8, filters=256, kernel_size=3, activation=tf.nn.relu, name='conv8b') pool8 = tf.layers.max_pooling2d(conv8b, pool_size=[2, 2], strides=2, name='pool8') if params['verbose_summary']: log_conv_kernel('{}/conv8'.format(name)) log_conv_kernel('{}/conv8b'.format(name)) tf.summary.image('pool8', pool8[:, :, :, 0:1]) return pool8
def model_fn(features, labels, mode, params): x = tf.reshape(features, [-1, 125, 161, 2], name='cnn4') x_norm = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN, name='x_norm') x = tf.reshape(x_norm[:, :, :, 0], [-1, 125, 161, 1], name='reshape_spec') if params['verbose_summary']: tf.summary.image('input', x) conv1 = tf.layers.conv2d(x, filters=16, kernel_size=5, activation=tf.nn.relu, name='conv1') pool1 = tf.layers.max_pooling2d(conv1, pool_size=[2, 2], strides=2, name='pool1') if params['verbose_summary']: log_conv_kernel('conv1') tf.summary.image('pool1', pool1[:, :, :, 0:1]) conv2 = tf.layers.conv2d(pool1, filters=32, kernel_size=5, activation=tf.nn.relu, name='conv2') pool2 = tf.layers.max_pooling2d(conv2, pool_size=[2, 2], strides=2, name='pool2') if params['verbose_summary']: log_conv_kernel('conv2') tf.summary.image('pool2', pool2[:, :, :, 0:1]) conv3 = tf.layers.conv2d(pool2, filters=64, kernel_size=5, activation=tf.nn.relu, name='conv3') pool3 = tf.layers.max_pooling2d(conv3, pool_size=[2, 2], strides=2, name='pool3') if params['verbose_summary']: log_conv_kernel('conv3') tf.summary.image('pool3', pool3[:, :, :, 0:1]) dim = pool3.get_shape()[1:] dim = int(dim[0] * dim[1] * dim[2]) flat = tf.reshape(pool3, [-1, dim], name='flat') dropout4 = tf.layers.dropout(flat, rate=params['dropout_rate'], training=mode == ModeKeys.TRAIN, name='dropout4') dense4 = tf.layers.dense(dropout4, units=128, activation=tf.nn.relu, name='dense4') logits = tf.layers.dense(dense4, units=params['num_classes'], name='logits') predictions = { 'classes': tf.argmax(logits, axis=1, name='prediction_classes'), 'probabilities': tf.nn.softmax(logits, name='prediction_probabilities'), } if mode == ModeKeys.PREDICT: return EstimatorSpec( mode=mode, predictions={'predictions': predictions['probabilities']}) tf.summary.image( 'confusion_matrix', conf_mat(labels, predictions['classes'], params['num_classes'])) onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=params['num_classes'], name='onehot_labels') loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits) tf.summary.scalar('loss', loss) optimizer = tf.train.GradientDescentOptimizer( learning_rate=params['learning_rate']) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) eval_metric_ops = { 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes']) } tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1]) return EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
def model_fn(features, labels, mode, params): x = tf.reshape(features, [-1, 125, 128, 1], name='input_flatv2') x_flat = tf.reshape(features, [-1, 16000]) x_norm = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN, name='x_norm') if params['verbose_summary']: tf.summary.image('input', x) tf.summary.audio('input', x_flat, 16000) conv1 = tf.layers.conv2d(x_norm, filters=16, kernel_size=7, activation=tf.nn.relu, name='conv1') conv2 = tf.layers.conv2d(conv1, filters=32, kernel_size=5, activation=tf.nn.relu, name='conv2') conv3 = tf.layers.conv2d(conv2, filters=64, kernel_size=3, activation=tf.nn.relu, name='conv3') pool3 = tf.layers.max_pooling2d(conv3, pool_size=[2, 2], strides=2, name='pool3') if params['verbose_summary']: for i in range(1, 4): label = 'conv{}'.format(i) graph_utils.log_conv_kernel(label) tf.summary.image(label, tf.expand_dims(conv1[..., 0], -1)) tf.summary.image('pool3', pool3[:, :, :, 0:1]) conv4 = tf.layers.conv2d(pool3, filters=128, kernel_size=3, activation=tf.nn.relu, name='conv4') conv5 = tf.layers.conv2d(conv4, filters=256, kernel_size=3, activation=tf.nn.relu, name='conv5') conv6 = tf.layers.conv2d(conv5, filters=512, kernel_size=3, activation=tf.nn.relu, name='conv6') pool6 = tf.layers.max_pooling2d(conv6, pool_size=[2, 2], strides=2, name='pool6') if params['verbose_summary']: for i in range(4, 7): label = 'conv{}'.format(i) graph_utils.log_conv_kernel(label) tf.summary.image(label, tf.expand_dims(conv1[..., 0], -1)) tf.summary.image('pool6', pool6[:, :, :, 0:1]) conv7 = tf.layers.conv2d(pool6, filters=1024, kernel_size=3, activation=tf.nn.relu, name='conv7') conv8 = tf.layers.conv2d(conv7, filters=1024, kernel_size=3, activation=tf.nn.relu, name='conv8') conv9 = tf.layers.conv2d(conv8, filters=1024, kernel_size=3, activation=tf.nn.relu, name='conv9') pool9 = tf.layers.max_pooling2d(conv9, pool_size=[2, 2], strides=2, name='pool9') if params['verbose_summary']: for i in range(7, 10): label = 'conv{}'.format(i) graph_utils.log_conv_kernel(label) tf.summary.image(label, tf.expand_dims(conv1[..., 0], -1)) tf.summary.image('pool9', pool9[:, :, :, 0:1]) conv10 = tf.layers.conv2d(pool9, filters=512, kernel_size=1, activation=tf.nn.relu, name='conv10') conv11 = tf.layers.conv2d(conv10, filters=512, kernel_size=1, activation=tf.nn.relu, name='conv11') conv12 = tf.layers.conv2d(conv11, filters=512, kernel_size=1, activation=tf.nn.relu, name='conv12') flat = flatten(conv12) dense = tf.layers.dense(flat, units=1024, activation=tf.nn.relu, name='dense') dropout = tf.layers.dropout(dense, rate=params['dropout_rate'], training=mode == tf.estimator.ModeKeys.TRAIN, name='dropout') logits = tf.layers.dense(dropout, units=12, name='logits') predictions = { 'classes': tf.argmax(logits, axis=1, name='prediction_classes'), 'probabilities': tf.nn.softmax(logits, name='prediction_softmax') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec( mode=mode, predictions={'predictions': predictions['probabilities']}) onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=12, name='onehot_labels') loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits) tf.summary.scalar('loss', loss) optimizer = tf.train.GradientDescentOptimizer( learning_rate=params['learning_rate']) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) eval_metric_ops = { 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes']) } tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1]) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
def model_fn(features, labels, mode, params): x = tf.reshape(features, [-1, 99, 161, 1], name='input_incep14') x_norm = tf.layers.batch_normalization( x, training=mode == tf.estimator.ModeKeys.TRAIN, name='x_norm') if params['verbose_summary']: tf.summary.image('input', x) conv1 = tf.layers.conv2d(x_norm, filters=16, kernel_size=3, activation=tf.nn.relu, name='conv1') conv2 = tf.layers.conv2d(conv1, filters=32, kernel_size=3, activation=tf.nn.relu, name='conv2') conv3 = tf.layers.conv2d(conv2, filters=32, kernel_size=3, strides=(2, 2), activation=tf.nn.relu, name='conv3') if params['verbose_summary']: log_conv_kernel('conv1') log_conv_kernel('conv2') log_conv_kernel('conv3') tf.summary.image('conv3', conv3[:, :, :, 0:1]) conv4 = tf.layers.conv2d(conv3, filters=64, kernel_size=3, activation=tf.nn.relu, name='conv4') conv5 = tf.layers.conv2d(conv4, filters=128, kernel_size=3, activation=tf.nn.relu, name='conv5') conv6 = tf.layers.conv2d(conv5, filters=128, kernel_size=3, strides=(2, 2), activation=tf.nn.relu, name='conv6') if params['verbose_summary']: log_conv_kernel('conv4') log_conv_kernel('conv5') log_conv_kernel('conv6') tf.summary.image('conv6', conv6[:, :, :, 0:1]) incep4 = inception_block_v2(conv6, t1x1=32, t3x3=32, t5x5=32, tmp=32, name='incep4', norm=True, mode=mode) incep5 = inception_block_v2(incep4, t1x1=48, t3x3=48, t5x5=48, tmp=48, name='incep5', norm=True, mode=mode) incep6 = inception_block_v2(incep5, t1x1=64, t3x3=64, t5x5=64, tmp=64, name='incep6', norm=True, mode=mode) # incep7 = inception_block_v2(incep6, t1x1=96, t3x3=96, t5x5=96, tmp=96, name='incep7', norm=True, mode=mode) incep8 = inception_block_v2(incep6, t1x1=128, t3x3=128, t5x5=128, tmp=128, name='incep8', norm=True, mode=mode) # incep9 = inception_block_v2(incep8, t1x1=192, t3x3=192, t5x5=192, tmp=192, name='incep9', norm=True, mode=mode) incep10 = inception_block_v2(incep8, t1x1=256, t3x3=256, t5x5=256, tmp=256, name='incep10', norm=True, mode=mode) # incep11 = inception_block_v2(incep10, t1x1=512, t3x3=512, t5x5=512, tmp=512, name='incep11', norm=True, mode=mode) # incep12 = inception_block_v2(incep11, t1x1=768, t3x3=768, t5x5=768, tmp=768, name='incep12', norm=True, mode=mode) flat = flatten(incep10) dense13 = tf.layers.dense(flat, units=2048, activation=tf.nn.relu, name='dense13') dropout13 = tf.layers.dropout(dense13, rate=params['dropout_rate'], training=mode == tf.estimator.ModeKeys.TRAIN, name='dropout13') logits = tf.layers.dense(dropout13, units=params['num_classes'], name='logits') predictions = { 'classes': tf.argmax(logits, axis=1, name='prediction_classes'), 'probabilities': tf.nn.softmax(logits, name='prediction_softmax') } if mode == tf.estimator.ModeKeys.PREDICT: return tf.estimator.EstimatorSpec( mode=mode, predictions={'predictions': predictions['probabilities']}) onehot_labels = tf.one_hot(indices=tf.cast(labels, tf.int32), depth=params['num_classes'], name='onehot_labels') loss = tf.losses.softmax_cross_entropy(onehot_labels=onehot_labels, logits=logits) tf.summary.scalar('loss', loss) optimizer = tf.train.GradientDescentOptimizer( learning_rate=params['learning_rate']) update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): train_op = optimizer.minimize(loss=loss, global_step=tf.train.get_global_step()) eval_metric_ops = { 'accuracy': tf.metrics.accuracy(labels=labels, predictions=predictions['classes']) } tf.summary.scalar('accuracy', eval_metric_ops['accuracy'][1]) return tf.estimator.EstimatorSpec(mode=mode, loss=loss, train_op=train_op, eval_metric_ops=eval_metric_ops)
def branch(x, name, mode, params): with tf.variable_scope(name): conv1 = tf.layers.conv2d(x, filters=16, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv1') conv1b = tf.layers.conv2d(conv1, filters=16, kernel_size=3, activation=tf.nn.relu, name='conv1b') pool1 = tf.layers.max_pooling2d(conv1b, pool_size=[2, 2], strides=2, name='pool1') if params['verbose_summary']: log_conv_kernel('{}/conv1'.format(name)) log_conv_kernel('{}/conv1b'.format(name)) tf.summary.image('pool1', pool1[:, :, :, 0:1]) incep2 = inception_block(pool1, t1x1=8, t3x3=8, t5x5=8, tmp=8, name='incep2') conv3 = tf.layers.conv2d(incep2, filters=32, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv3') conv3b = tf.layers.conv2d(conv3, filters=32, kernel_size=3, activation=tf.nn.relu, name='conv3b') pool3 = tf.layers.max_pooling2d(conv3b, pool_size=[2, 2], strides=2, name='pool3') if params['verbose_summary']: log_conv_kernel('{}/conv3'.format(name)) log_conv_kernel('{}/conv3b'.format(name)) tf.summary.image('pool3', pool3[:, :, :, 0:1]) conv5 = tf.layers.conv2d(pool3, filters=64, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv5') conv5b = tf.layers.conv2d(conv5, filters=64, kernel_size=3, activation=tf.nn.relu, name='conv5b') pool5 = tf.layers.max_pooling2d(conv5b, pool_size=[2, 2], strides=2, name='pool5') if params['verbose_summary']: log_conv_kernel('{}/conv5'.format(name)) log_conv_kernel('{}/conv5b'.format(name)) tf.summary.image('pool5', pool5[:, :, :, 0:1]) incep6 = inception_block(pool5, t1x1=32, t3x3=32, t5x5=32, tmp=32, name='incep6') conv7 = tf.layers.conv2d(incep6, filters=128, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv7') conv7b = tf.layers.conv2d(conv7, filters=128, kernel_size=3, activation=tf.nn.relu, name='conv7b') pool7 = tf.layers.max_pooling2d(conv7b, pool_size=[2, 2], strides=2, name='pool7') if params['verbose_summary']: log_conv_kernel('{}/conv7'.format(name)) log_conv_kernel('{}/conv7b'.format(name)) tf.summary.image('pool7', pool7[:, :, :, 0:1]) conv8 = tf.layers.conv2d(pool7, filters=256, kernel_size=3, padding='same', activation=tf.nn.relu, name='conv8') conv8b = tf.layers.conv2d(conv8, filters=256, kernel_size=3, activation=tf.nn.relu, name='conv8b') pool8 = tf.layers.max_pooling2d(conv8b, pool_size=[2, 2], strides=2, name='pool8') if params['verbose_summary']: log_conv_kernel('{}/conv8'.format(name)) log_conv_kernel('{}/conv8b'.format(name)) tf.summary.image('pool8', pool8[:, :, :, 0:1]) flat = flatten(pool8) dense = tf.layers.dense(flat, units=2048, activation=tf.nn.relu, name='dense') dropout = tf.layers.dropout( dense, rate=params['dropout_rate'], training=mode == tf.estimator.ModeKeys.TRAIN, name='dropout') return dropout