コード例 #1
0
def routing_as_recon(pose, is_train):
    batch_size = int(pose.get_shape()[0])
    caps_num_in = int(pose.get_shape()[2])
    caps_num_out = int(pose.get_shape()[1])

    # calculate y_j given all r_ji = 1 as the first step of iteration 0
    y = tf.reduce_sum(pose, axis=2, keepdims=True)

    for i in range(cfg.iter_routing - 1):
        y = y / tf.norm(y, axis=-1, keepdims=True)

        r = tf.reduce_sum(pose * y, axis=-1, keepdims=True) / 2

        y = tf.reduce_sum(pose * r, axis=2, keepdims=True) / tf.reduce_sum(
            tf.square(r), axis=2, keepdims=True)

    k = slim.variable('sc_k',
                      shape=[caps_num_out, 1, 16],
                      dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0,
                                                                  stddev=0.01),
                      trainable=is_train)
    b = slim.variable('sc_b',
                      shape=[caps_num_out, 1, 1],
                      dtype=tf.float32,
                      initializer=tf.constant_initializer(0.01),
                      trainable=is_train)
    activation_out = tf.reduce_sum(k * y, axis=-1, keepdims=True) + b

    output = tf.squeeze(y)
    activation_out = tf.squeeze(activation_out)
    return output, activation_out
コード例 #2
0
def mat_transform(input, caps_num_c, regularizer, tag=False):
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])
    width_out = int(input.get_shape()[2])

    # the output of capsule is miu, the mean of a Gaussian, and activation, the sum of probabilities
    # it has no relationship with the absolute values of w and votes
    # using weights with bigger stddev helps numerical stability
    if tag:
        output = tf.reshape(
            input, shape=[batch_size, caps_num_i * width_out, 1, 4, 4])
        w = slim.variable('w', shape=[1, width_out, caps_num_c, 4, 4])
        w = tf.tile(w, [batch_size, caps_num_i, 1, 1, 1])
    else:
        output = tf.reshape(input,
                            shape=[batch_size, caps_num_i, width_out, 4, 4])
        w = slim.variable('w',
                          shape=[1, caps_num_i, caps_num_c * width_out, 4, 4],
                          dtype=tf.float32,
                          initializer=tf.truncated_normal_initializer(
                              mean=0.0, stddev=1.0),
                          regularizer=regularizer)

        w = tf.tile(w, [batch_size, 1, 1, 1, 1])

    output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
    if tag:
        output = tf.reshape(output, [batch_size, -1, caps_num_c, 4, 4])
        votes = tf.reshape(tf.matmul(output, w),
                           [batch_size, -1, caps_num_c, 16])
    else:
        output = tf.reshape(output, [batch_size, caps_num_i, -1, 4, 4])
        votes = tf.reshape(tf.matmul(
            output, w), [batch_size, caps_num_i, caps_num_c * width_out, 16])
    return votes
コード例 #3
0
 def LinearProj_mid(self, phi_cross, phi_T_y, shrinkage, multiplier, stage):
     '''
     Input Argument:
         [batch, height, width, depth] phi_T_y: One step reconstruction initialization
         [height, width, depth, depth] phi_cross: phi_T_Phi the inner product of each tube
     Return:
         [batch, height, width, depth] Reconstruction Result
     '''
     gamma, rho = [], []
     with tf.variable_scope('LinearProj_%d' % (stage), reuse=None):
         with slim.arg_scope(
             [slim.variable],
                 dtype=tf.float32,
                 initializer=slim.initializers.xavier_initializer(),
                 regularizer=slim.l2_regularizer(0.0),
                 trainable=self.is_training):
             pattern_aggr = phi_T_y
             for ind_pattern in range(self.num_pattern):
                 rho.append(
                     slim.variable(name='rho_%d' % (ind_pattern), shape=[]))
                 gamma.append(
                     slim.variable(name='gamma_%d' % (ind_pattern),
                                   shape=[1, 1, self.ratio, self.ratio]))
                 auxli_v = shrinkage[ind_pattern] - multiplier[ind_pattern]
                 auxli_v = rho[-1] * slim.fully_connected(
                     auxli_v,
                     self.ratio,
                     scope='LiPro_%d_1' % (ind_pattern))
                 auxli_v = slim.fully_connected(auxli_v,
                                                self.ratio,
                                                activation_fn=None,
                                                scope='LiPro_%d_End' %
                                                (ind_pattern))
                 pattern_aggr += auxli_v
             return self.Sparse_Inverse(phi_cross, rho, gamma, pattern_aggr)
コード例 #4
0
def batch_normalization(tensor_in,
                        epsilon=1e-10,
                        decay=0.9,
                        variables_collections=None,
                        outputs_collections=None,
                        reuse=None,
                        scope=None):
    """Element-wise batch normalization. This is only the first
    half of the typical batch normalization calculation
    (standardization by the batch mean and variance).
        u = (u_pre - mean) / variance
    """
    with tf.variable_scope(scope, 'batch_normalization', [tensor_in],
                           reuse=reuse) as sc:
        tensor_in = tf.convert_to_tensor(tensor_in)
        input_shape = tensor_in.get_shape().as_list()
        input_ndim = len(input_shape)
        axis = list(range(input_ndim - 1))

        moving_mean_collections = layers.utils.get_variable_collections(
            variables_collections, 'moving_mean')
        moving_mean = slim.variable('moving_mean',
                                    shape=input_shape[-1:],
                                    initializer=tf.zeros_initializer,
                                    collections=moving_mean_collections,
                                    trainable=False)
        moving_variance_collections = layers.utils.get_variable_collections(
            variables_collections, "moving_variance")
        moving_variance = slim.variable('moving_variance',
                                        shape=input_shape[-1:],
                                        initializer=tf.constant_initializer(1.),
                                        collections=moving_variance_collections,
                                        trainable=False)

        def update_mean_var():
            mean, variance = tf.nn.moments(tensor_in, axis, name='moments')
            update_moving_mean = moving_averages.assign_moving_average(
                moving_mean, mean, decay)
            update_moving_variance = moving_averages.assign_moving_average(
                moving_variance, variance, decay)
            with tf.control_dependencies([update_moving_mean,
                                          update_moving_variance]):
                return tf.identity(mean), tf.identity(variance)

        is_training = training_ops.get_training_mode()
        mean, variance = control_flow_ops.cond(
                is_training,
                update_mean_var,
                lambda: (moving_mean, moving_variance))

        layers.utils.collect_named_outputs(
            outputs_collections, scope + '/mean', mean)

        layers.utils.collect_named_outputs(
            outputs_collections, scope + '/variance', variance)

        # actually apply the normalization
        variance_epsilon = tensor_utils.to_tensor(epsilon, tensor_in.dtype.base_dtype)
        return (tensor_in - mean) * tf.rsqrt(variance + variance_epsilon)
コード例 #5
0
def em_routing(pose, activation, is_train):
    batch_size = int(pose.get_shape()[0])
    caps_num_in = int(pose.get_shape()[2])
    caps_num_out = int(pose.get_shape()[1])

    r = tf.constant(
        np.ones([batch_size, caps_num_out, caps_num_in], dtype=np.float32) /
        caps_num_out)
    activation = tf.reshape(activation, [batch_size, 1, -1])

    beta_v = slim.variable('beta_v',
                           shape=[caps_num_out, 1, 16],
                           dtype=tf.float32,
                           initializer=tf.truncated_normal_initializer(
                               mean=0.0, stddev=0.01),
                           trainable=is_train)
    beta_a = slim.variable('beta_a',
                           shape=[caps_num_out, 1, 1],
                           dtype=tf.float32,
                           initializer=tf.constant_initializer(0.01),
                           trainable=is_train)

    for i in range(cfg.iter_routing):
        # m-step
        r1 = tf.reshape(r * activation,
                        [batch_size, caps_num_out, caps_num_in, 1])
        r1_sum = tf.reduce_sum(r1, axis=2, keepdims=True)
        r1 = r1 / (r1_sum + cfg.epsilon)

        miu = tf.reduce_sum(
            r1 * pose, axis=2,
            keepdims=True)  #batch_size, caps_num_out, caps_num_in=1, 16
        sigma_square = tf.reduce_sum(
            r1 * tf.square(pose - miu), axis=2,
            keepdims=True)  #batch_size, caps_num_out, caps_num_in=1, 16

        activation_out = tf.reduce_sum(
            beta_v + tf.log(tf.sqrt(sigma_square)), axis=-1, keepdims=True
        ) * r1_sum  #batch_size, caps_num_out, caps_num_in=1, 1
        activation_out = tf.nn.sigmoid(
            np.power(10.0, i - 3.0) *
            (beta_a -
             activation_out))  #batch_size, caps_num_out, caps_num_in=1, 1

        # e_step
        if i < cfg.iter_routing - 1:
            log_p = -tf.log(tf.sqrt(sigma_square)) - tf.square(pose - miu) / (
                2 * sigma_square)
            log_p = log_p - (tf.reduce_max(log_p, axis=[1, 3], keepdims=True) -
                             tf.log(10.0))
            p = tf.exp(tf.reduce_sum(log_p, -1, keepdims=True))

            r = activation_out * p
            r = tf.squeeze(
                r / (tf.reduce_sum(r, axis=1, keepdims=True) + cfg.epsilon))

    pose = tf.squeeze(miu)  #batch_size, caps_num_out, 16
    activation = tf.squeeze(activation_out)  #batch_size, caps_num_out
    return pose, activation
コード例 #6
0
ファイル: network.py プロジェクト: douzeyang/fill_network
    def build_net(self):
        input_weights = slim.variable(
            'input_weights',
            shape=[int(self.input_shape),
                   int(self.in_shape)],
            initializer=tf.truncated_normal_initializer(stddev=0.1),
            regularizer=slim.l2_regularizer(0.005),
        )
        encode1 = mask_matmul(self.input, self.mask_encode, input_weights,
                              self.batch_size)
        input_bias = slim.variable(
            'input_bias',
            shape=int(self.in_shape),
            initializer=tf.truncated_normal_initializer(stddev=0.1),
            regularizer=slim.l2_regularizer(0.005))
        encode1 = tf.nn.bias_add(encode1, input_bias)
        encode1 = tf.nn.dropout(encode1, keep_prob=self.keep_prob)
        encode1 = tf.nn.elu(encode1)
        encode2 = slim.fully_connected(encode1,
                                       1024,
                                       activation_fn=slim.nn.elu,
                                       scope='fc2')

        encode3 = slim.fully_connected(encode2,
                                       512,
                                       activation_fn=slim.nn.elu,
                                       scope='fc3')
        encode4 = slim.fully_connected(encode3,
                                       64,
                                       activation_fn=slim.nn.elu,
                                       scope='fc4')
        encode4 = tf.nn.dropout(encode4, keep_prob=self.keep_prob)
        decode3 = slim.fully_connected(
            encode4, 512, activation_fn=slim.nn.elu, scope='fc5') + encode3

        decode2 = slim.fully_connected(
            decode3, 1024, activation_fn=slim.nn.elu, scope='fc6') + encode2
        output_weights = slim.variable(
            'output_weights',
            shape=[self.out_shape, self.output_shape],
            initializer=tf.truncated_normal_initializer(stddev=0.1),
            regularizer=slim.l2_regularizer(0.005),
        )
        # mask_output_weights = output_weights*self.mask_decode
        decode2 = tf.nn.dropout(decode2, keep_prob=self.keep_prob)
        net = tf.matmul(decode2, output_weights) * self.mask_decode
        # net = tf.nn.sigmoid(net)
        return net


# if __name__ == "__main__":
#     network = fill_net()
#     net = network.build_net()
#     print(1)
コード例 #7
0
    def _fully_connected(self, x, out_dim):
        """FullyConnected layer for final output."""

        # x = tf.reshape(x, [x.get_shape()[0].value, -1])
        x = flatten(x)
        w = slim.variable(
            'DW', [x.get_shape()[1], out_dim],
            initializer=tf.uniform_unit_scaling_initializer(factor=1.0))
        b = slim.variable('biases', [out_dim],
                          initializer=tf.constant_initializer(),
                          regularizer=None)
        return tf.nn.xw_plus_b(x, w, b)
コード例 #8
0
    def LinearProj_mid(self, measurement, phi_T_E, mask, linearer, shrinkage,
                       multiplier1, multiplier2, stage):
        '''
        Input Argument:
            [batch, height, width, depth] phi_T_y: One step reconstruction initialization
            [height, width, depth, depth] phi_cross: phi_T_Phi the inner product of each tube
        Return:
            [batch, height, width, depth] Reconstruction Result
        '''
        gamma,rho,lamda,result2,multipliernew1,multipliernew2,multiplier= [],[],[],[],[],[],[]
        with tf.variable_scope('LinearProj_%d' % (stage), reuse=None):
            with slim.arg_scope(
                [slim.variable],
                    dtype=tf.float32,
                    initializer=slim.initializers.xavier_initializer(),
                    regularizer=slim.l2_regularizer(0.0),
                    trainable=self.is_training):

                auxli_v1 = 0

                for ind_pattern in range(self.num_pattern):
                    gamma.append(
                        slim.variable(name='gamma_%d' % (ind_pattern),
                                      shape=[]))

                    multiplier = multiplier1[ind_pattern] - gamma[
                        ind_pattern] * (linearer - shrinkage[ind_pattern])
                    aux_mid = linearer - shrinkage[
                        ind_pattern] - multiplier / gamma[ind_pattern]
                    #aux_mid = slim.fully_connected(aux_mid ,self.ratio,scope='LiPro_%d_1'%(ind_pattern))
                    auxli_v1 += gamma[ind_pattern] * aux_mid
                    multipliernew1.append(multiplier)

                rho = slim.variable(name='rho_%d' % (1), shape=[])
                lamda = slim.variable(name='lamda_%d' % (1), shape=[])
                phi_I = tf.reduce_sum(tf.multiply(mask, linearer),
                                      axis=-1,
                                      keepdims=True)
                multipliernew2 = multiplier2 - lamda * (measurement - phi_I)
                auxli_v2 = lamda * (
                    tf.multiply(mask,
                                (phi_I + multipliernew2 / lamda)) - phi_T_E)

                auxli_v2 = auxli_v2 + auxli_v1
                #auxli_v2 = slim.fully_connected(auxli_v2,self.ratio,activation_fn=None,scope='LiPro_%d_End'%(1))
                auxli_v = rho * auxli_v2

                result1 = linearer - auxli_v
                for ind_pattern in range(self.num_pattern):
                    result2.append(result1 - multipliernew1[ind_pattern] /
                                   gamma[ind_pattern])
        return result1, result2, multipliernew1, multipliernew2
コード例 #9
0
def primary_routing(inputs, is_train):
    batch_size = int(inputs.get_shape()[0])
    data_size = int(inputs.get_shape()[1])
    num_capsules = int(inputs.get_shape()[3])
    dim = int(inputs.get_shape()[-1])

    b = tf.fill([batch_size, data_size, data_size, num_capsules, 1], 0.0)
    biases = slim.variable('baises',
                           shape=[cfg.num_capsule_primary, dim],
                           initializer=tf.constant_initializer(0.1),
                           dtype=tf.float32,
                           trainable=is_train)
    test_vals = tf.global_variables()

    for i in range(cfg.primary_routing_num):
        if cfg.leaky:
            c = leaky_softmax(b, num_capsules, dim=3)
        else:
            c = tf.nn.softmax(b, dim=3)

        if i == cfg.primary_routing_num - 1:
            s = inputs * c
            v = squash(s + biases)
        else:
            s = inputs * c
            v = squash(s + biases)
            b += tf.reduce_sum(v * inputs, axis=-1, keepdims=True)

    return v
コード例 #10
0
def mat_transform(input, output_cap_size, size):
    """Compute the vote.

    :param inputs: shape (size, 288, 16)
    :param output_cap_size: 32

    :return votes: (24, 5, 5, 3x3=9, 136)
    """

    caps_num_i = int(input.get_shape()[1])  # 288
    output = tf.reshape(input, shape=[size, caps_num_i, 1, 4,
                                      4])  # (size, 288, 1, 4, 4)

    w = slim.variable('w',
                      shape=[1, caps_num_i, output_cap_size, 4, 4],
                      dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(
                          mean=0.0, stddev=1.0))  # (1, 288, 32, 4, 4)

    w = tf.tile(w, [size, 1, 1, 1, 1])  # (24, 288, 32, 4, 4)

    output = tf.tile(output,
                     [1, 1, output_cap_size, 1, 1])  # (size, 288, 32, 4, 4)

    votes = tf.matmul(output, w)  # (24, 288, 32, 4, 4)
    votes = tf.reshape(
        votes, [size, caps_num_i, output_cap_size, 16])  # (size, 288, 32, 16)

    return votes
コード例 #11
0
def mat_transform(input, caps_num_c, regularizer, tag=False):
    # extracts the transformation matrices parameters as a tensorflow trainable variable w
    # It then multiplies with the "tiled" input pose matirces to generate the votes for parent capsules
    # 输入[1250,72,16]
    print('*** Inside mat_transform input shape', input)
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])
    output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])
    # the output of capsule is miu, the mean of a Gaussian, and activation, the sum of probabilities
    # it has no relationship with the absolute values of w and votes
    # using weights with bigger stddev helps numerical stability
    # Note: 这里的w是transformation invariant matrix, 由于不同的capsule层有不同的name, 得以区分
    w = slim.variable('w', shape=[1, caps_num_i, caps_num_c, 4, 4], dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0),
                      regularizer=regularizer)
    
    # create a new tensor by replication input miltiple times
    # ouput[i] dimension: input.dims(i) * multiples[i] elements
    # 在第一个维度上进行了batch_size次重复,并没有重新赋予其他值

    # 这里的w在BP时候只更新一个w,这里进行tile的复制是为了后面计算方便??
    
    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
    print('*** Inside mat_transform w shape', w)          # (1250, 72, 16, 4, 4)
    print('*** Inside mat_trasform output shape', output) # (1250, 72, 16, 4, 4)
    print('*** Inside mat_trasform output tf.matmul(output, w)', tf.matmul(output, w)) # (1250, 72, 16, 4, 4) 
    # (1250, 72, 16, 4, 4) 与 (1250, 72, 16, 4, 4) 相乘,其实是最后两个维度的(4,4)进行矩阵相乘 
    votes = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_i, caps_num_c, 16])
    return votes
コード例 #12
0
def mat_transform(input, caps_num_c, regularizer):
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])
    if cfg.is_mat:  # 4x4 pose matrix
        output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])
        shape_w = [1, caps_num_i, caps_num_c, 4, 4]
    else:  # length 16 pose vector
        output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 16])
        shape_w = [1, caps_num_i, caps_num_c, 16]
    # the output of capsule is miu, the mean of a Gaussian, and activation, the sum of probabilities
    # it has no relationship with the absolute values of w and votes
    # using weights with bigger stddev helps numerical stability
    w = slim.variable('w',
                      shape=shape_w,
                      dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0,
                                                                  stddev=1.0),
                      regularizer=regularizer)

    if cfg.is_mat:
        w = tf.tile(w, [batch_size, 1, 1, 1, 1])
        output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
        votes = tf.reshape(tf.matmul(output, w),
                           [batch_size, caps_num_i, caps_num_c, 16])
    else:
        w = tf.tile(w, [batch_size, 1, 1, 1])
        output = tf.tile(output, [1, 1, caps_num_c, 1])
        votes = tf.reshape(output * w,
                           [batch_size, caps_num_i, caps_num_c, 16])

    return votes
コード例 #13
0
 def _conv3d(self,
             inputs,
             num_filters,
             filter_size=(3, 3, 3),
             stride=1,
             padding='SAME'):
     """
 3D convoluional layer
 :param inputs:
 :param num_filters:
 :param size:
 :param stride:
 :param padding:
 :return:
 """
     assert len(filter_size) == 3
     stride_vec = _stride_arr(stride)
     filter_shape = filter_size + [
         inputs.get_shape()[-1].value, num_filters
     ]
     n = reduce(lambda x, y: x * y, filter_size) * num_filters
     filt = slim.variable(
         'DW',
         filter_shape,
         tf.float32,
         initializer=tf.random_normal_initializer(stddev=np.sqrt(2.0 / n)),
         regularizer=slim.l2_regularizer(self.WEIGHT_DECAY))
     net = tf.nn.conv3d(inputs,
                        filter=filt,
                        strides=stride_vec,
                        padding=padding)
     net = tf.nn.relu(net)
     return net
コード例 #14
0
    def test_slim(self):
        weight = slim.variable(
            name='weights',
            shape=[10 * 10 * 2 * 3],
            initializer=tf.truncated_normal_initializer(stddev=0.1),
            regularizer=slim.l2_regularizer(0.05))

        return
コード例 #15
0
def hybrid_cnn(cnn_in, scope):
    # with slim.arg_scope([slim.conv2d, slim.fully_connected],
    #                     normalizer_fn=slim.batch_norm,
    #                     weights_regularizer=slim.l2_regularizer(0.0005)):

    with tf.name_scope(scope):
        conv11 = slim.conv2d(cnn_in, num_outputs=128, kernel_size=1)
        conv12 = slim.conv2d(conv11,
                             num_outputs=96,
                             kernel_size=3,
                             padding='SAME')

        conv21 = slim.conv2d(cnn_in, num_outputs=64, kernel_size=1)
        conv22 = slim.conv2d(conv21,
                             num_outputs=32,
                             kernel_size=5,
                             padding='SAME')

        dilated_filter1 = slim.variable(
            name=scope + '_filter1',
            shape=[3, 3, 128, 64],
            initializer=tf.truncated_normal_initializer(stddev=0.01))
        dilated_filter2 = slim.variable(
            name=scope + '_filter2',
            shape=[3, 3, 64, 32],
            initializer=tf.truncated_normal_initializer(stddev=0.01))

        conv31 = tf.nn.atrous_conv2d(cnn_in,
                                     dilated_filter1,
                                     rate=2,
                                     padding='SAME')
        conv32 = tf.nn.atrous_conv2d(conv31,
                                     dilated_filter2,
                                     rate=3,
                                     padding='SAME')

        conv41 = slim.conv2d(cnn_in, num_outputs=32, kernel_size=1)
        conv42 = slim.max_pool2d(conv41,
                                 kernel_size=2,
                                 stride=1,
                                 padding='SAME')

        concat = tf.concat([conv12, conv22, conv32, conv42], axis=3)

    return concat
コード例 #16
0
 def __init__(self, loss_list):
     self._loss_list = loss_list
     self._sigmas_sq = []
     for i in range(len(self._loss_list)):
         self._sigmas_sq.append(
             slim.variable('Sigma_sq_' + str(i),
                           dtype=tf.float32,
                           shape=[],
                           initializer=tf.initializers.random_uniform(
                               minval=0.2, maxval=1)))
コード例 #17
0
def normolVariable():
    weight1 = slim.variable(
        name="weight1",
        shape=[2, 3],
        initializer=tf.truncated_normal_initializer(stddev=0.1),
        regularizer=slim.l2_regularizer(scale=0.05))
    weight2 = slim.variable(
        name="weight2",
        shape=[2, 3],
        initializer=tf.truncated_normal_initializer(stddev=0.1),
        regularizer=slim.l2_regularizer(scale=0.05))
    variable = slim.get_variables()
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        print(sess.run(weight1))
        print("-----------------")
        print(sess.run(weight2))
        print("----------------")
        print(sess.run(variable))
コード例 #18
0
ファイル: tch_model.py プロジェクト: xiaojiew1/KDGAN
  def __init__(self, flags, is_training=True):
    self.is_training = is_training

    self.text_ph = tf.placeholder(tf.int64, shape=(None, None))
    self.label_ph = tf.placeholder(tf.float32, shape=(None, config.num_label))

    tch_scope = 'teacher'
    # initializer = tf.random_uniform([config.vocab_size, flags.embedding_size], -0.1, 0.1)
    with tf.variable_scope(tch_scope) as scope:
      with slim.arg_scope([slim.fully_connected],
          weights_regularizer=slim.l2_regularizer(flags.tch_weight_decay)):
        word_embedding = slim.variable('word_embedding',
            shape=[config.vocab_size, flags.embedding_size],
            # regularizer=slim.l2_regularizer(flags.tch_weight_decay),
            initializer=tf.random_uniform_initializer(-0.1, 0.1))
        # word_embedding = tf.get_variable('word_embedding', initializer=initializer)
        text_embedding = tf.nn.embedding_lookup(word_embedding, self.text_ph)
        text_embedding = tf.reduce_mean(text_embedding, axis=-2)
        self.logits = slim.fully_connected(text_embedding, config.num_label,
                  activation_fn=None)

    self.labels = tf.nn.softmax(self.logits)

    if not is_training:
      return

    save_dict = {}
    for variable in tf.trainable_variables():
      if not variable.name.startswith(tch_scope):
        continue
      print('%s added to TCH saver' % variable.name)
      save_dict[variable.name] = variable
    self.saver = tf.train.Saver(save_dict)

    global_step = tf.train.get_global_step()
    decay_steps = int(config.train_data_size / config.train_batch_size * flags.num_epochs_per_decay)
    learning_rate = tf.train.exponential_decay(flags.init_learning_rate,
        global_step, decay_steps, flags.learning_rate_decay_factor,
        staircase=True, name='exponential_decay_learning_rate')
    
    loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        labels=self.label_ph, logits=self.logits))
    losses = [loss]
    regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
    losses.extend(regularization_losses)
    total_loss = tf.add_n(losses, name='total_loss')

    optimizer = tf.train.AdamOptimizer(learning_rate)
    self.train_op = optimizer.minimize(total_loss, global_step=global_step)

    tf.summary.scalar('total_loss', total_loss)
    self.summary_op = tf.summary.merge_all()
コード例 #19
0
def squash_with_bias(inputs, is_train):
    num_caps = int(inputs.get_shape()[1])
    regularizer = tf.contrib.layers.l2_regularizer(scale=cfg.weight_decay)
    biases = slim.variable('baises',
                           shape=[num_caps, 16],
                           regularizer=regularizer,
                           initializer=tf.constant_initializer(0.01),
                           dtype=tf.float32,
                           trainable=is_train)

    outputs = squash(inputs + biases)

    return outputs
コード例 #20
0
ファイル: inference.py プロジェクト: zsommer/yolo-tf
 def batch_norm(net):
     net = slim.batch_norm(net,
                           center=center,
                           scale=True,
                           epsilon=1e-5,
                           is_training=training)
     if not center:
         net = tf.nn.bias_add(
             net,
             slim.variable('biases',
                           shape=[tf.shape(net)[-1]],
                           initializer=tf.zeros_initializer()))
     return net
コード例 #21
0
def transfer_style(content_image_filename, style_image_filename,
                   result_image_filename, content_loss_weight,
                   style_loss_weight, total_variation_loss_weight,
                   max_iterations):
    content_image = read_content_image(content_image_filename)
    style_image = read_style_image(style_image_filename,
                                   content_image.shape[:2])

    image = slim.variable('input',
                          initializer=tf.constant(np.expand_dims(
                              content_image, 0),
                                                  dtype=tf.float32),
                          trainable=True)
    content_layer, style_layers = vgg_tools.get_layers(image,
                                                       reuse_variables=False)

    content_layer_target = vgg_tools.get_content_layer_values(
        content_image, True)
    content_loss = content_loss_weight * losses.get_content_loss(
        content_layer, content_layer_target)

    style_layers_targets = vgg_tools.get_style_layers_values(style_image, True)
    style_loss = style_loss_weight * losses.get_style_loss(
        style_layers, style_layers_targets)

    total_variation_loss = total_variation_loss_weight * losses.get_total_variation_loss(
        image)

    total_loss = content_loss + style_loss + total_variation_loss
    train_operation = tf.train.AdamOptimizer(
        learning_rate=1e0).minimize(total_loss)

    saver = tf.train.Saver(tf.get_collection('model_variables'))
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        saver.restore(sess, VGG_19_CHECKPOINT_FILENAME)

        for i in range(max_iterations):
            content_loss_value, style_loss_value, total_variation_loss_value, total_loss_value, _ = sess.run(
                [
                    content_loss, style_loss, total_variation_loss, total_loss,
                    train_operation
                ])
            if i % 50 == 0:
                print(
                    f'Iteration: {i}, Content loss: {content_loss_value:.4}. Style loss: {style_loss_value:.4}. '
                    f'Total variation loss: {total_variation_loss_value:.4}. Total loss: {total_loss_value:.4}'
                )

        result = sess.run(image)
        write_result_image(result, result_image_filename)
コード例 #22
0
 def LinearProj_orig(self, phi_cross, phi_T_y):
     '''
     Input Argument:
         [batch, height, width, depth] phi_T_y: One step reconstruction initialization
         [height, width, depth, depth] phi_cross: phi_T_Phi the inner product of each tube
     Return:
         [batch, height, width, depth] Reconstruction Result
     '''
     gamma, rho = [], []
     with tf.variable_scope('LinearProj_init', None, reuse=None):
         with slim.arg_scope(
             [slim.variable],
                 dtype=tf.float32,
                 initializer=slim.initializers.xavier_initializer(),
                 regularizer=slim.l2_regularizer(0.0),
                 trainable=self.is_training):
             for ind_pattern in range(self.num_pattern):
                 rho.append(
                     slim.variable(name='rho_%d' % (ind_pattern), shape=[]))
                 gamma.append(
                     slim.variable(name='gamma_%d' % (ind_pattern),
                                   shape=[1, 1, self.ratio, self.ratio]))
             return self.Sparse_Inverse(phi_cross, rho, gamma, phi_T_y)
コード例 #23
0
def vec_transform(input, caps_num_out, channel_num_out):
    batch_size = int(input.get_shape()[0])
    caps_num_in = int(input.get_shape()[1])
    channel_num_in = int(input.get_shape()[-1])

    w = slim.variable('w', shape=[1, caps_num_out, caps_num_in, channel_num_in, channel_num_out], dtype=tf.float32,
                      initializer=tf.random_normal_initializer(mean=0.0, stddev=0.01)) #

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.reshape(input, shape=[batch_size, 1, caps_num_in, 1, channel_num_in])
    output = tf.tile(output, [1, caps_num_out, 1, 1, 1])

    output = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_out, caps_num_in, channel_num_out])

    return output
コード例 #24
0
def scale_and_center(tensor_in,
                     scale=True,
                     center=True,
                     reuse=None,
                     variables_collections=None,
                     scope=None):
    """Applies the trainable batch-norm correction to a normalized tensor:
        u = gamma * (u_pre + beta)
    """
    with tf.variable_scope(scope, 'scale_and_offset', [tensor_in],
                           reuse=reuse) as sc:
        tensor_in = tf.convert_to_tensor(tensor_in)
        input_shape = tensor_utils.get_shape(tensor_in)

        outputs = tensor_in
        if center:
            beta_collections = layers.utils.get_variable_collections(
                variables_collections, "beta")
            beta = slim.variable("beta",
                                 shape=[input_shape[-1]],
                                 initializer=tf.zeros_initializer,
                                 collections=beta_collections,
                                 trainable=True)
            outputs = outputs + beta

        if scale:
            gamma_collections = layers.utils.get_variable_collections(
                variables_collections, "gamma")
            gamma = slim.variable("gamma",
                                  shape=[input_shape[-1]],
                                  initializer=tf.constant_initializer(1.),
                                  collections=gamma_collections,
                                  trainable=True)
            outputs = gamma * outputs

        return outputs
コード例 #25
0
ファイル: capsnet_em.py プロジェクト: lzqkean/deep_learning
def mat_transform(input, caps_num_c, regularizer, tag=False):
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])
    output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])
    # the output of capsule is miu, the mean of a Gaussian, and activation, the sum of probabilities
    # it has no relationship with the absolute values of w and votes
    # using weights with bigger stddev helps numerical stability
    w = slim.variable('w', shape=[1, caps_num_i, caps_num_c, 4, 4], dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0),
                      regularizer=regularizer)

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
    votes = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_i, caps_num_c, 16])

    return votes
コード例 #26
0
def vec_transform(input, caps_num_out, channel_num_out, regularizer):
    batch_size = int(input.get_shape()[0])
    caps_num_in = int(input.get_shape()[1])
    channel_num_in = int(input.get_shape()[-1])

    w = slim.variable('w', shape=[1, caps_num_out, caps_num_in, channel_num_in, channel_num_out], dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
                      regularizer=regularizer)

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.reshape(input, shape=[batch_size, 1, caps_num_in, 1, channel_num_in])
    output = tf.tile(output, [1, caps_num_out, 1, 1, 1])

    output = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_out, caps_num_in, channel_num_out])

    return output
コード例 #27
0
def mat_transform(input, caps_num_c, regularizer, tag=False):
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])  #3 * 3 * B
    output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])

    w = slim.variable('w',
                      shape=[1, caps_num_i, caps_num_c, 4, 4],
                      dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0,
                                                                  stddev=1.0),
                      regularizer=regularizer)

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])  #使用tile代替循环相乘。提升效率
    output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
    #votes = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_i, caps_num_c, 16])
    votes = tf.reshape(output @ w, [batch_size, caps_num_i, caps_num_c, 16])

    return votes
コード例 #28
0
def dilated_conv2D_layer(inputs, num_outputs, kernel_size, rate, padding,
                         scope, use_bias, weights_regularizer):
    with tf.variable_scope(name_or_scope=scope):
        in_channels = inputs.get_shape().as_list()[3]
        kernel = [kernel_size, kernel_size, in_channels, num_outputs]
        filter_weight = slim.variable(
            name='weights',
            shape=kernel,
            initializer=tf.truncated_normal_initializer(stddev=0.1),
            regularizer=weights_regularizer)

        inputs = tf.nn.atrous_conv2d(inputs,
                                     filter_weight,
                                     rate=rate,
                                     padding=padding)  # + bias
        if use_bias:
            bias = tf.Variable(tf.constant(0.01, shape=[num_outputs]))
            inputs = inputs + bias
        return inputs
コード例 #29
0
ファイル: network.py プロジェクト: zxgineng/deepcv
        def l2_normalization(inputs, scaling=True):
            """
            cal l2_norm on channel
            :param inputs: 4D tensor, shape-[N,H,W,C]
            :param scaling: bool
            :return outputs: 4D tensor, shape-[N,H,W,C]
            """
            with tf.variable_scope('L2Normalization'):
                inputs_shape = inputs.get_shape()
                channel_shape = inputs_shape[-1:]
                # cal l2_norm on channel
                outputs = tf.nn.l2_normalize(inputs, 3, epsilon=1e-12)
                # scalling
                if scaling:
                    # scale.shape == channel.shape
                    scale = slim.variable('gamma', channel_shape, tf.float32, tf.constant_initializer(1.0))
                    outputs = tf.multiply(outputs, scale)

                return outputs
コード例 #30
0
def mat_transform(pose, kernel, caps_num_i, caps_num_c):
    
    output = tf.reshape(pose, shape=[cfg.batch_size,-1, kernel*kernel,caps_num_i, 1, 4, 4])
    b = int(output.get_shape()[0])
    wh = int(output.get_shape()[1])
    
    w = slim.variable('w'+str(caps_num_c), shape=[1,1, kernel*kernel, caps_num_i, caps_num_c, 4, 4], dtype=tf.float32)
    print ('    mat_transform input0',pose)
    print ('    mat_transform input1',output)
    print ('    mat_transform  w',w)
    w = tf.tile(w, [b,wh, 1, 1, 1, 1, 1])
    output = tf.tile(output, [1, 1, 1, 1, caps_num_c, 1, 1])
    print ('    mat_transform tile a',output)
    print ('    mat_transform tile b',w)
    votes = tf.matmul(output, w)
    print ('    mat_transform tile a*b',votes)
    votes = tf.reshape(votes, [b, wh, kernel*kernel,caps_num_i, caps_num_c, 16])
    print ('    mat_transform votes ',votes )
    return votes
コード例 #31
0
def mat_transform(input, caps_num_c, regularizer, tag=False):
    """
    Function for calculating the votes of a capsule layer, V_i_j = M_i * W_i_j
    """
    batch_size = int(input.get_shape()[0])
    caps_num_i = int(input.get_shape()[1])
    # output is M: the 4x4 pose matrix associated with every capsule. This depends on the current input and is not
    # stored.
    output = tf.reshape(input, shape=[batch_size, caps_num_i, 1, 4, 4])
    # w is W: the 4x4 weight matrix and is learned discriminatively
    w = slim.variable('w', shape=[1, caps_num_i, caps_num_c, 4, 4], dtype=tf.float32,
                      initializer=tf.truncated_normal_initializer(mean=0.0, stddev=1.0),
                      regularizer=regularizer)

    w = tf.tile(w, [batch_size, 1, 1, 1, 1])
    output = tf.tile(output, [1, 1, caps_num_c, 1, 1])
    # A vote is given by the pose matrix transformed by the weight matrix.
    votes = tf.reshape(tf.matmul(output, w), [batch_size, caps_num_i, caps_num_c, 16])

    return votes
コード例 #32
0
def mat_transform(input, caps_num_out, is_train):
    batch_size = int(input.get_shape()[0])
    caps_num_in = int(input.get_shape()[1])

    std = math.sqrt(2. / caps_num_out)

    regularizer = tf.contrib.layers.l2_regularizer(scale=cfg.weight_decay)
    w = slim.variable('w',
                      shape=[1, caps_num_out, caps_num_in, 4, 4],
                      regularizer=regularizer,
                      initializer=tf.random_normal_initializer(mean=0.0,
                                                               stddev=std),
                      trainable=is_train)

    output = tf.reshape(input, [batch_size, 1, caps_num_in, 4, 4])
    output = tf.tile(output, [1, caps_num_out, 1, 1, 1])

    output = tf.reshape(faster_matmul(output, w),
                        [batch_size, caps_num_out, caps_num_in, -1])

    return output
コード例 #33
0
ファイル: capsnet_em.py プロジェクト: lzqkean/deep_learning
def em_routing(votes, activation, caps_num_c, regularizer, tag=False):
    test = []

    batch_size = int(votes.get_shape()[0])       # 1250
    caps_num_i = int(activation.get_shape()[1])  # 72
    n_channels = int(votes.get_shape()[-1])      # 16

    # m-step
    r = tf.constant(np.ones([batch_size, caps_num_i, caps_num_c], dtype=np.float32) / caps_num_c) # (1250, 72, 16)
    r = r * activation

    # tf.reshape(tf.reduce_sum(r, axis=1), shape=[batch_size, 1, caps_num_c])
    r_sum = tf.reduce_sum(r, axis=1, keep_dims=True)   # (1250, 1, 16)
    r1 = tf.reshape(r / (r_sum + cfg.epsilon),
                    shape=[batch_size, caps_num_i, caps_num_c, 1]) # (1250, 72, 16, 1)

    miu = tf.reduce_sum(votes * r1, axis=1, keep_dims=True)     # (1250, 1, 16, 16)
    sigma_square = tf.reduce_sum(tf.square(votes - miu) * r1,
                                 axis=1, keep_dims=True) + cfg.epsilon # (1250, 1, 16, 16)

    beta_v = slim.variable('beta_v', shape=[caps_num_c, n_channels], dtype=tf.float32,
                           initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
                           regularizer=regularizer)                    # (16, 16)
    r_sum = tf.reshape(r_sum, [batch_size, caps_num_c, 1])             # (1250, 16, 1)
    cost_h = (beta_v + tf.log(tf.sqrt(tf.reshape(sigma_square,
                                                 shape=[batch_size, caps_num_c, n_channels])))) * r_sum  # (1250, 16, 16)

    beta_a = slim.variable('beta_a', shape=[caps_num_c], dtype=tf.float32,
                           initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.01),
                           regularizer=regularizer)                                                      # (16, )
    activation1 = tf.nn.sigmoid(cfg.ac_lambda0 * (beta_a - tf.reduce_sum(cost_h, axis=2)))               # (1250, 16)

    test.append(miu)

    for iters in range(cfg.iter_routing):

        # e-step

        # Contributor: Yunzhi Shi
        # log and exp here provide higher numerical stability especially for bigger number of iterations
        log_p_c_h = -tf.log(tf.sqrt(sigma_square)) - \
            (tf.square(votes - miu) / (2 * sigma_square))        # (1250, 72, 16, 16)
        log_p_c_h = log_p_c_h - \
            (tf.reduce_max(log_p_c_h, axis=[2, 3], keep_dims=True) - tf.log(10.0)) # (1250, 72, 16, 16)
        p_c = tf.exp(tf.reduce_sum(log_p_c_h, axis=3))

        a1 = tf.reshape(activation1, shape=[batch_size, 1, caps_num_c])  # (1250, 1, 16)
        ap = p_c * a1   # (1250, 72, 16)

        sum_ap = tf.reduce_sum(ap, axis=2, keep_dims=True)     # (1250, 72, 1)
        r = ap / (sum_ap + cfg.epsilon)                        # (1250, 72, 16)

        # m-step
        r = r * activation

        r_sum = tf.reduce_sum(r, axis=1,
                              keep_dims=True)  # tf.reshape(tf.reduce_sum(r, axis=1), shape=[batch_size, 1, caps_num_c])
        r1 = tf.reshape(r / (r_sum + cfg.epsilon),
                        shape=[batch_size, caps_num_i, caps_num_c, 1])

        miu = tf.reduce_sum(votes * r1, axis=1, keep_dims=True)
        sigma_square = tf.reduce_sum(tf.square(votes - miu) * r1,
                                     axis=1, keep_dims=True) + cfg.epsilon

        r_sum = tf.reshape(r_sum, [batch_size, caps_num_c, 1])
        cost_h = (beta_v + tf.log(tf.sqrt(tf.reshape(sigma_square,
                                                     shape=[batch_size, caps_num_c, n_channels])))) * r_sum

        activation1 = tf.nn.sigmoid(
            (cfg.ac_lambda0 + (iters + 1) * cfg.ac_lambda_step) * (beta_a - tf.reduce_sum(cost_h, axis=2)))

        test.append(miu)

    return miu, activation1, test     # (1250, 1, 16, 16), (1250, 16),
コード例 #34
0
ファイル: inference.py プロジェクト: happog/yolo-tf
 def batch_norm(net):
     net = slim.batch_norm(net, center=center, scale=True, epsilon=1e-5, is_training=training)
     if not center:
         net = tf.nn.bias_add(net, slim.variable('biases', shape=[tf.shape(net)[-1]], initializer=tf.zeros_initializer()))
     return net
コード例 #35
0
ファイル: variables_test.py プロジェクト: LevinJ/CodeSamples
import tensorflow as tf
import tensorflow.contrib.slim as slim
# Build a graph.


weights = slim.variable('weights',
                             shape=[10, 10, 3 , 3],
                             initializer=tf.truncated_normal_initializer(stddev=0.1),
                             regularizer=slim.l2_regularizer(0.05),
                             device='/CPU:0')

weights_2 = slim.model_variable('weights_2',
                              shape=[10, 10, 3 , 3],
                              initializer=tf.truncated_normal_initializer(stddev=0.1),
                              regularizer=slim.l2_regularizer(0.05),
                              device='/CPU:0')

my_var = slim.variable('my_var',
                       shape=[20, 1],
                       initializer=tf.zeros_initializer())
regular_variables_and_model_variables = slim.get_variables()

variables_to_restore = slim.get_variables_to_restore(exclude=["v1"])
# Launch the graph in a session.
sess = tf.Session()

# Evaluate the tensor `c`.
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())
    print(my_var.eval())