Esempio n. 1
0
    def loss(self,output, l_out):
        with tf.variable_scope('loss') as scope:
            nonzero_bool = tf.not_equal(output, tf.contant(0, tf.float32))
            nonzero_mat = tf.cast(nonzero_bool, tf.float32)
            l_out_nonzero = tf.multiply(l_out, nonzero_mat)
            cross_entropy = tf.square(tf.subtract(l_out_nonzero, output))
            cost = tf.reduce_mean(cross_entropy, name=scope.name)

        return cost
def batch_norm_template(inputs, is_training, scope, moments_dims, bn_decay):
	'''
	Args:
	inputs: Tensor 4D, BHWC input maps
	is_training : boolean tf.Variable , true indicates that training phase
	bn_decay : float or float tensor variable, controling the moving average weight
	scope: string, varible scope
	moments_dims: a list if ints, indicating dimensions for moments calculation
	Return:
	normed: batch normalized maps
	'''
	with tf.variable_scope(scope) as sc:
		num_channels = inputs.get_shape()[-1].value
		beta = tf.Variable(tf.constant(0.0, shape=[num_channels]), 
			name = 'beta',
			trainable = True
			)
		gamma = tf.Variable(tf.contant(1.0, shape=[num_channels]),
			name='gamma',
			trainable = True
			)
		batch_mean, batch_var = tf.nn.moments(inputs, moments_dims, name='moments')
		decay = bn_decay if bn_decay is not None else 0.9
		ema = tf.train.ExponentialMovingAverage(decay=decay)

		#operator that maintains the moving averages of variables
		ema_apply_op = tf.cond(is_training,
			lambda:ema.apply([batch_mean, batch_var]),
			lambda:tf.no_op())

		def mean_var_with_update():
			with tf.control_dependencies([ema_apply_op]):
				return tf.identity(batch_mean), tf.identity(batch_var)

		#ema.average returns the Variable holding the average of var
		mean, var = tf.cond(is_training,
			mean_var_with_update,
			lambda: (ema.average(batch_mean), ema.average(batch_var)))
		normed = tf.nn.batch_normalization(inputs, mean, var, beta, gamma, 1e-3)
	return normed
Esempio n. 3
0
        x = tf.map_fn(lambda img: tf.image.random_flip_left_right(img), x)
        return x

    XX = tf.cond(is_training, lambda: aug_image(X), lambda: X)

    logits = cnn(XX, is_training=is_training)

    clf_loss = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=Y))

    reg_loss_list = []
    for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES):
        if re.search('weights', v.name):
            reg_loss_list.append(l2(v))
            logging.info('Apply {} for {}'.format(l2.__name__, v.name))
    reg_loss = tf.add_n(reg_loss_list) if reg_loss_list else tf.contant(
        0.0, dtype=tf.float32)

    loss = clf_loss + reg_loss

    coords_check = tf.get_collection('70f92c137c01d89c6477c5ef22411bfe')
    coords_w = coords_check[0][0]
    coords_h = coords_check[0][1]

    grid_weights = []
    for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES):
        if re.search('GRID', v.name):
            grid_weights.append(v)
    convbn_weights = [
        v for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)
        if v not in grid_weights
    ]
Esempio n. 4
0
def inference(images):
    parameters = [] 
    # the first convolution layer
    # name_scope: like C++'s namespace
    with tf.name_scope('conv1')as scope:
        # convolution kernel's size = 11*11 and this color passageway has 3 and have 64 convolution kernel
        kernel = tf.Variable(tf.truncated_normal([11,11,3,64],dtype=tf.float32,stddev=1e-1),name='weights')
        #conv = [batch_size,heigth/4 up,width/4 up,convolution_num]
        # https ://www.cnblogs.com/lovepysics/p/7220111.html hint: comput the convolution layer 
        conv = tf.nn.conv2d(images,kernel,[1,4,4,1],padding='SAME')
        # form the tensor which shape is 1*64=[1,1,1,1,....,1]has 64 1 elements per line
        biases = tf.Variable(tf.constant(0.0,shape=[64],dtype=tf.float32),trainable=True,name='bias')
        # output shape = conv shape
        bias = tf.nn.bias_add(conv,biases)
        # output shape = bias shape
        conv1 = tf.nn.relu(bias,name=scope)
        print_activations(conv1)
        parameters+=[kernel,biases]
        # lrn1 shape = conv1 shape 
        # have some problems
        # depth_radius =4, bias =1.0,alpha =0.001/9,beta=0.75 alexnet's rendementation
        lrn1 = tf.nn.lrn(conv1,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn1')
        # pool1 shape = [batch_size,(height-3)/2+1,(width-3)/2+1,convolution-kernel_size]
        pool1 = tf.nn.max_pool(lrn,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VALID',name='pool1')
        print_activations(pool1)  
    with tf.name_scope('conv2')as scope:
        kernel1 = tf.Variable(tf.truncated_normal([5,5,64,192],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(pool1,kernel,[1,1,1,1],padding='SAME')
        bias = tf.Variable(tf.contant(0.0,shape=[192],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv2 = tf.nn.relu(bias,name=scope)
        parameters += [kernel,biases]
        print_activations(conv2)
        lrn2 = tf.nn.lrn(conv2,4,bias=1.0,alpha=0.001/9,beta=0.75,name='lrn2')
        pool2 = tf.nn.max_pool(lrn2,ksize=[1,3,3,1],strides=[1,2,2,1],padding='VAILD',name='pool2')
        print_activations(pool2) 
    with tf.name_scope('conv3')as scope:
        kernel = tf.Variable(tf.truncated_normal([3,3,192,384],dtype=tf.float32,stddev=1e-1),name='SAME')
        conv = tf.nn.conv2d(pool2,kernel,[1,1,1,1],padding="SAME")
        biases = tf.Variable(tf/constant(0.0,shape=[384],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv3 =  tf.nn.relu(bias,name=scope)
        paremeters += [kernel,biases]
    with tf.name_scope('conv4')as scope:
        kernel = tf.Variable(tf.truncated_normal([3,3,384,256],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(conv3,kernel,[1,1,1,1],padding='SAME')
        biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv4 = tf.nn.relu(bias,name=scope)
        parameters += [kernel,biases]
        print_activations(conv4)
    with tf.name_scope('conv5')as scope:
        kernel = tf.Variable(tf.truncated_normal([3,3,256,256],dtype=tf.float32,stddev=1e-1),name='weights')
        conv = tf.nn.conv2d(conv4,kernel,[1,1,1,1],padding="SAME")
        biases = tf.Variable(tf.constant(0.0,shape=[256],dtype=tf.float32),trainable=True,name='biases')
        bias = tf.nn.bias_add(conv,biases)
        conv5 = tf.nn.relu(bias,name=scope)
        parameters +=[kernel,biases]
        print_activations(conv5)
        pool5 = tf.nn.max_pool(conv5,ksize=[1,3,3,1],strides=[1,2,2,1],padding="VALID",name='pool5')
        print_activations(pool5)
    return pool5,parameters
import tensorflow as tf
hello = tf.contant('hello')
sess = tf.session()
prinnt(sess.run(hello))

import cv2
print('openCV')

# 不同图片的压缩比
import cv2
# read_type: 1 彩色图片
img = cv2.imread(file_name, read_type)

# IMWRITE_JPG_QUALIFY 图片质量,
# numbers 0~100
# jpg的压缩质量可以控制,即有损压缩,无法设置透明度
# png是无损压缩,还可以设置透明度
cv2.imwrite(file_name, img, [cv2.IMWRITE_JPG_QUALIFY, numbers])
# numbers: 0~9 值越小,压缩效果越差
cv2.imwrite(file_name, img, [cv2.IMWRITE_PNG_COMPRESSION, numbers])

## 像素处理
img = cv2.imread(file_name, 1)
# 100行100列对应的像素
(b,g,r) = img[100,100]

# tf基本信息
data1 = tf.constant(2.5)
data2 = tf.Variable(10, name='var')
data3 = tf.constant(2.5, dtype=tf.int32)
print(data1)
Esempio n. 6
0
      [1,0,0]
      [0,1,0]]
'''

#scatter 有目的性的根据坐标对值进行更新
'''
    tf.scatter_nd(
    indices,      根据坐标改变对应底板上的值
    updates,
    shape         shape给定底板,一维二维多维,所有值默认为0
    
'''
indices = tf.constant([[4], [3], [1], [7]])
updates = tf.constant([9, 10, 11, 12])
#底板上下标4的位置更新为9,下标3的位置更新为10,下标1的位置更新为11,下标7的位置更新为12
shape = tf.contant([8])  #底板是一维,值全为0、长度为8的tensor
tf.scatter_nd(indices, updates, shape)
#返回[0,11,0,10,9,0,0,12]
#tf.scatter_nd只能更新底板为0的值
#scatter使用复杂,详情看课时47、48

#meshgrid  应用在48课时,不好笔述,记得复习
'''
给定x和y的范围,生成[x1,y1],[x2,y2],[x3,y3]...等范围内的坐标
'''
y = tf.linspace(-2., 2., 5)  #生成-2.到2.的五个值,均匀分布
x = tf.linspace(-2., 2., 5)
point_x, point_y = tf.meshgrid(x, y)
point_x.shape  #[5,5],共25个值,每一列上的值都相等
point_y.shape  #[5,5],共25个值,每一行上的值都相等
Esempio n. 7
0
def start(x_in, y_in, signal2model):
    E = np.random.uniform(-np.sqrt(1. / signal2model.signal_dim), np.sqrt(1. / signal2model.signal_dim),
                          (signal2model.hidden_dim, signal2model.signal_dim))
    U = np.random.uniform(-np.sqrt(1. / signal2model.hidden_dim), np.sqrt(1. / signal2model.hidden_dim),
                          (signal2model.hidden_dim, signal2model.hidden_dim))
    W = np.random.uniform(-np.sqrt(1. / signal2model.hidden_dim), np.sqrt(1. / signal2model.hidden_dim),
                          (signal2model.hidden_dim, signal2model.hidden_dim))
    V = np.random.uniform(-np.sqrt(1. / signal2model.hidden_dim), np.sqrt(1. / signal2model.hidden_dim),
                          (signal2model.signal_dim, signal2model.hidden_dim))

    b = np.zeros((signal2model.hidden_dim))
    c = np.zeros((signal2model.signal_dim))


    E = tf.Variable(E, name="E")
    U = tf.Variable(U, name="U")
    W = tf.Variable(W, name="W")
    b = tf.Variable(b, name="b")
    c = tf.Variable(c, name="c")

    x = tf.contant(x_in)
    y = tf.contant(y_in)

    var = tf.global_variables_initializer()


    coversion_ones = tf.ones((signal2model.mini_batch_size, 1))

    def GRU(i, U, W, b, x_0, s_previous):
        U_copy, W_copy = U, W
        b1 = tf.matmul(coversion_ones, b[i * 3, :])
        b2 = tf.matmul(coversion_ones, b[i * 3 + 1, :])
        b3 = tf.matmul(coversion_ones, b[i * 3 + 2, :])

        z = tf.sigmoid(tf.add(tf.add(tf.matmul(U[i * 3 + 0], x_0), tf.matmul(W[i * 3 + 0],s_previous)), b1))
        r = tf.sigmoid(tf.add(tf.add(tf.matmul(U[i * 3 + 1], x_0), tf.matmul(W[i * 3 + 1], s_previous)), b2))
        s_candidate = tf.tanh(tf.add(tf.add(tf.matmul(U_copy[i * 3 + 2], x_0),
                                            tf.matmul(W[i * 3 + 2], tf.matmul(s_previous, r)), b3)))

        return tf.add(tf.matmul(tf.ones_like(z) - z, s_candidate), tf.matmul(z, s_previous))


    def forward_prop_step(x_t, s_prev):
        # Embedding layer
        x_0 = E[:, x_t[:, 0]]
        s = tf.zeros_like(s_prev)

        # GRU BLOCK 1 [1 vs 1] #############
        # GRU Layer 1
        # print(x_e0)
        # print(s_prev)

        s[0] = GRU(0, U, W, b, x_0, s_prev[0])
        s[1] = GRU(1, U, W, b, s[0], s_prev[1])
        s[2] = GRU(2, U, W, b, s[1], s_prev[2])


        # Final output calculation
        # FIRST DIMENSION:
        o_t =tf.sparse_softmax()

            T.stack(T.nnet.softmax((V[0].dot(s_[:, 0, 1]) + conversion_ones.dot(c[0])))[0],
                      T.nnet.softmax((V[1].dot(s_[:, 1, 1]) + conversion_ones.dot(c[1])))[0],
                      T.nnet.softmax((V[2].dot(s_[:, 2, 1]) + conversion_ones.dot(c[2])))[0],
                      T.nnet.softmax((V[3].dot(s_[:, 3, 1]) + conversion_ones.dot(c[3])))[0])

        return [o_t, s_]
Esempio n. 8
0
import tensorflow as tf

### 创建图开始 ###

# initial a constant variable
# 1 x 2 创建一个矩阵 op
matrix1 = tf.contant([[3., 3.]])
# 创建另一个2 x 1矩阵 op
matrix2 = tf.constant([[2.], [4.]])

# 创建一个矩阵乘法 op
product = tf.matmul(matrix1, matrix2)

### 创建图结束 ###

### 通过创建会话session来运行图