Exemplo n.º 1
0
def deconv(data,
           output_shape,
           scope,
           shape,
           stride=[1, 2, 2, 1],
           padding='SAME',
           wd=0.0,
           FLOAT16=False,
           reuse=None):
    with tf.variable_scope(scope, 'DeConv', [data], reuse=reuse):
        STDdev = 1 / tf.sqrt(
            shape[0] * shape[1] * shape[2] / 2)  #Xavier/2 initialization
        kernel = _variable_with_weight_decay('weights',
                                             shape=shape,
                                             stddev=STDdev,
                                             wd=wd,
                                             FLOAT16=FLOAT16)
        deconv = tf.nn.conv2d_transpose(data,
                                        kernel,
                                        output_shape,
                                        stride,
                                        padding=padding)
        biases = _variable_on_cpu(
            'biases', [shape[2]],
            tf.constant_initializer(0.00001))  #positive biases
        pre_activation = tf.nn.bias_add(deconv, biases)
        sm._activation_summary(pre_activation)
    return pre_activation
Exemplo n.º 2
0
def fclayer(data,batch_size,hidden,scope,wd=0.0,FLOAT16=False,reuse=None):
    with tf.variable_scope(scope, 'fc',[data],reuse=reuse):
        # Move everything into depth so we can perform a single matrix multiply.
        reshape = tf.reshape(data, [batch_size,-1])
        dim = reshape.get_shape()[1].value
        weights = _variable_with_weight_decay('weights', shape=[dim, hidden],
                                          stddev=0.04, wd=wd,FLOAT16=FLOAT16)
        biases = _variable_on_cpu('biases', [hidden], tf.constant_initializer(0.00001))
        pre_activation = tf.matmul(reshape, weights) + biases
        sm._activation_summary(pre_activation)
    return pre_activation    
Exemplo n.º 3
0
def dilated_conv(data,scope,shape,rate=1,padding='SAME',wd=0.0,FLOAT16=False,reuse=None):
  with tf.variable_scope(scope, 'Dilated_Conv', [data], reuse=reuse):
    STDdev=1/tf.sqrt(shape[0]*shape[1]*shape[2]/2) #Xavier/2 initialization      
    kernel = _variable_with_weight_decay('weights',
                                         shape=shape,
                                         stddev=STDdev,
                                         wd=wd,FLOAT16=FLOAT16)
    conv = tf.nn.atrous_conv2d(data, kernel, rate, padding=padding)
    biases = _variable_on_cpu('biases', [shape[3]], tf.constant_initializer(0.0001))#positive biases
    pre_activation = tf.nn.bias_add(conv, biases)
    sm._activation_summary(pre_activation)
  return pre_activation  
Exemplo n.º 4
0
def batch_norm_layer(x, train_phase, scope_bn, reuse=None):
    with tf.variable_scope(scope_bn, [x], reuse=reuse):
        z = batch_norm(x,
                       decay=0.999,
                       fused=False,
                       center=True,
                       scale=True,
                       is_training=train_phase,
                       reuse=reuse,
                       trainable=True,
                       scope=scope_bn,
                       updates_collections=None,
                       variables_collections=[
                           "batch_norm_non_trainable_variables_collection"
                       ])
        sm._activation_summary(z)
        return z
Exemplo n.º 5
0
def leakyReLU(x,leak, scope, reuse=None):
    with tf.variable_scope(scope, 'leakyReLU', [x], reuse=reuse):
        leakyrelu= tf.nn.leaky_relu(x,alpha=leak,name=None)
        sm._activation_summary(leakyrelu)
        return leakyrelu  
Exemplo n.º 6
0
def ReLU(x,scope, reuse=None):
    with tf.variable_scope(scope, 'ReLU', [x], reuse=reuse):
        relu= tf.nn.relu(x)
        sm._activation_summary(relu)
        return relu