コード例 #1
0
def reductionA(name, x):

    with tf.variable_scope(name) as scope:
        layer_ = max_pool('max_pool_0', x, k=3, s=2, padding='VALID')

        layer__ = convolution2d('cnn__0', x, 192, k=3, s=2, padding='VALID')

        layer___ = convolution2d('cnn___0', x, 224, k=1, s=1, padding='SAME')
        layer___ = convolution2d('cnn___1',
                                 layer___,
                                 256,
                                 k=3,
                                 s=1,
                                 padding='SAME')
        layer___ = convolution2d('cnn___2',
                                 layer___,
                                 385,
                                 k=3,
                                 s=2,
                                 padding='VALID')

        layer_join = tf.concat([layer_, layer__, layer___],
                               axis=3,
                               name='join')
        print 'layer_name :', 'join'
        print 'layer_shape :', layer_join.get_shape()
    return layer_join
コード例 #2
0
def stem_2(name ,x ):
    with tf.variable_scope(name) as scope:
        layer= convolution2d('cnn_0' , x, 192,k=3,s=2,padding='VALID')
        layer_=max_pool('max__0' , x, k=3 , s=2 , padding = 'VALID')
        layer_join = tf.concat([layer , layer_] , axis = 3 ,name='join')
        print 'layer_name :','join'
        print 'layer_shape :',layer_join.get_shape()
    return layer_join
コード例 #3
0
def stem(name , x):
    with tf.variable_scope(name) as scope:
        layer=convolution2d('cnn_0',x,32,k=3,s=2 , padding='VALID')
        layer = convolution2d('cnn_1',layer, 32, k = 3, s = 1, padding = 'VALID')
        layer = convolution2d('cnn_2', layer, 64, k=3, s=1, padding='SAME')
        layer_1 = max_pool('max_3', layer, k=3, s=2, padding='VALID')
        layer_2 = convolution2d('cnn_3_1', layer, 96, k=3, s=2, padding='VALID')
        layer_join=tf.concat([layer_1,layer_2] , axis=3 , name='join')
        print 'layer_name :','join'
        print 'layer_shape :',layer_join.get_shape()
    return layer_join
コード例 #4
0
def reductionC(name ,x ):
    with tf.variable_scope(name) as scope:
        layer=max_pool('max_pool0' , x ,3,2 , padding='VALID')

        layer_=convolution2d('cnn_0',x , 256,1,1)
        layer_=convolution2d('cnn_1',layer_, 384 , 3,1 , padding='VALID')

        layer__ = convolution2d('cnn_0', x, 256, 1, 1)
        layer__ = convolution2d('cnn_1', layer__, 256, 3, 1 , padding='VALID')

        layer___= convolution2d('cnn__0',x, 256 , 1,1)
        layer___= convolution2d('cnn__1',layer___,256 , 3,1)
        layer___= convolution2d('cnn__2',layer___,256 , 3,1 , padding='VALID')

        layer_join= tf.concat([layer , layer_ , layer__ , layer___], axis=3 , name='join' )
        return layer_join
コード例 #5
0
##########################setting############################

image_height, image_width, image_color_ch, n_classes, train_imgs, train_labs, test_imgs, test_labs = data.mnist_28x28(
)
x_ = tf.placeholder(dtype=tf.float32,
                    shape=[None, image_height, image_width, image_color_ch],
                    name='x_')
y_ = tf.placeholder(dtype=tf.int32, shape=[None, n_classes], name='y_')
phase_train = tf.placeholder(dtype=tf.bool, name='phase_train')
batch_size = 60
##########################structure##########################

layer, conv1_summary_tensor = convolution2d('conv1', x_, 64)
#layer = batch_norm_1(layer, phase_train, 'bn0')
layer = batch_norm_2(layer, phase_train, 'bn0')  # best learning rate ==> 0.1
layer = max_pool('max_pool1', layer)
layer, topconv_summary_tensor = convolution2d('top_conv', layer, 128)
layer = batch_norm_2(layer, phase_train,
                     'bn1')  # best learning rate ==> 0.01 batch norm이 많아지면
layer, fc_summary_tensor = affine('fully_connect',
                                  layer,
                                  1024,
                                  keep_prob=0.5,
                                  phase_train=phase_train)
layer = batch_norm_1(layer, phase_train, 'bn2')
y_conv = logits('end_layer', layer, n_classes)

merged = tf.summary.merge_all()

#############################################################
#cam = get_class_map('gap', top_conv, 0, im_width=image_width)
コード例 #6
0
ファイル: RAM.py プロジェクト: SoulDuck/RAM-tf
#for Mnist version
pool_indices = [1, 4, 7, 10]
out_chs = [32, 32, 64, 64, 64]
filters = [5, 3, 5, 3, 3]
strides = [2, 1, 2, 1, 1]  #,1,1,1,1,1,1,1,1]

#Building Network
layer = x_
assert len(out_chs) == len(filters)
n_layers = len(out_chs)
for i in range(n_layers):
    layer = convolution2d('conv_{}'.format(i), layer, out_chs[i], s=1)
    if i in pool_indices:
        'max pool'
        layer = max_pool('maxPool_{}'.format(i), layer)

top_conv = tf.identity(layer, 'top_conv')
logits = ram('ram', top_conv)

#logits=ram('ram' ,  top_conv)
# Build Optimizer
pred,pred_cls , cost , train_op,correct_pred ,accuracy = \
    algorithm(y_conv=logits , y_=y_ ,learning_rate=lr , optimizer='sgd' , use_l2_loss=True ,activation='sigmoid' , cost_func='mse')

sess = tf.Session()
init = tf.group(tf.global_variables_initializer(),
                tf.local_variables_initializer())
sess.run(init)
max_step = 10000
ckpt_point = 100