Exemple #1
0
def embed_stimulus(embedx, embedy, stimx, stimy, stim_history=30, is_training=True):
  """Embed stimulus"""

  # Embed stimulus
  stim_tf = tf.placeholder(tf.float32, shape = [None, stimx, stimy, stim_history])
  net = stim_tf
  n_repeats = 3
  output_size = 5
  conv_sz = 5
  with tf.name_scope('stim_model'):
    # pass EI through a few layers of convolutions
    with slim.arg_scope([slim.conv2d],
                        activation_fn=tf.nn.relu,
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                        weights_regularizer=slim.l2_regularizer(0.005),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params={'is_training': is_training}):
      net =  slim.repeat(net, n_repeats, slim.conv2d, output_size,
                         [conv_sz, conv_sz], scope='conv_stim')
      net = slim.conv2d(net, 1, [conv_sz, conv_sz], scope='conv_stim_final')

      # Rotate stimulus
      rotation_global_stim = tf.Variable(0., name='rotate_stim')  # probably better to rotate the stimulus.
      # Letting TF-Slim know about the additional variable.
      slim.add_model_variable(rotation_global_stim)
      net = tf.contrib.image.rotate(net, rotation_global_stim)

      # Scale stimulus
      net = tf.image.resize_images(net, [embedx, embedy])

      stimulus_embedding = tf.reduce_sum(net, 3)

    return stimulus_embedding, stim_tf
Exemple #2
0
def variableUse():
    weight = tf.Variable(tf.ones([2, 3]))
    slim.add_model_variable(weight)
    modelVariable = slim.get_model_variables()

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        print(sess.run(modelVariable))
Exemple #3
0
def add_noise_plane(net, noise_channels, training=True):
    noise_shape = net.get_shape().as_list()
    noise_shape[-1] = noise_channels
    noise_planes = tf.random_normal(shape=noise_shape)
    biases = tf.Variable(tf.constant(0.0,
                                     shape=[noise_channels],
                                     dtype=tf.float32),
                         trainable=True,
                         name='noise_mu')
    if training:
        slim.add_model_variable(biases)
    noise_planes = tf.nn.bias_add(noise_planes, biases)
    return tf.concat(3, [net, noise_planes],
                     name='add_noise_{}'.format(noise_channels))
Exemple #4
0
def embed_ei(embedx, embedy, eix, eiy, n_elec, ei_embedding_matrix, is_training=True):
  """Embed EIs."""

  # EI -> receptive fields
  ei_tf = tf.placeholder(tf.float32, shape = [n_elec, None]) # n_elec x # cells
  ei_embed_tf = tf.constant(ei_embedding_matrix.astype(np.float32), name='ei_embedding')  # eix x eiy x n_elec
  ei_embed_2d_tf = tf.reshape(ei_embed_tf, [eix * eiy, n_elec])
  ei_embed = tf.matmul(ei_embed_2d_tf, ei_tf)
  ei_embed_3d = tf.reshape(ei_embed, [eix, eiy, -1])

  # make a embed using slim
  net = tf.expand_dims(tf.transpose(ei_embed_3d, [2, 0, 1]), 3)

  # Get RF map from EI
  n_repeats = 3
  output_size = 5
  conv_sz = 5
  with tf.name_scope('ei_model'):
    # pass EI through a few layers of convolutions
    with slim.arg_scope([slim.conv2d],
                        activation_fn=tf.nn.relu,
                        weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
                        weights_regularizer=slim.l2_regularizer(0.005),
                        normalizer_fn=slim.batch_norm,
                        normalizer_params={'is_training': is_training}
                        ):
      net =  slim.repeat(net, n_repeats, slim.conv2d, output_size,
                         [conv_sz, conv_sz], scope='conv_ei')


    # Rotate the image.
    rotation_global_ei = tf.Variable(0., name='rotate_ei')  # probably better to rotate the stimulus.
    # Letting TF-Slim know about the additional variable.
    slim.add_model_variable(rotation_global_ei)
    net = tf.contrib.image.rotate(net, rotation_global_ei)

    # scale image
    net = tf.image.resize_images(net, [embedx, embedy])
    ei_embedding = tf.transpose(tf.reduce_sum(net, 3), [1, 2, 0]) # embedx x embedy x n_cells

  return ei_embedding, ei_tf
'''
    https://www.cnblogs.com/bmsl/p/dongbin_bmsl_01.html
    http://blog.csdn.net/guvcolie/article/details/77686555
    https://www.2cto.com/kf/201706/649266.html
    
    《Learning TensorFlow_ A Guide to - Tom Hope》 很好的一章第七章slim
'''
'''
TF - Slim的优势:slim作为一种轻量级的tensorflow库,使得模型的构建,训练,测试都变得更加简单。
'''
# 1.使用方法:
import tensorflow as tf
import tensorflow.contrib.slim as slim
# 2.组成部分:
# arg_scope: 使得用户可以在同一个arg_scope中使用默认的参数
# data,evaluation,layers,learning,losses,metrics,nets,queues,regularizers,variables
# 3.定义模型
# 在slim中,组合使用 variables, layers 和 scopes 可以简洁的定义模型。
''' variales '''
# (1)variables: 定义于variables.py。生成一个weight变量, 用truncated
# normal初始化它, 并使用l2正则化,并将其放置于CPU上, 只需下面的代码即可:
weights = slim.variable(
    'weights',
    shape=[10, 10, 3, 3],
    initializer=tf.truncated_normal_initializer(stddev=0.1),
    regularizer=slim.l2_regularizer(0.05),
    device='/CPU:0')
# 原生tensorflow包含两类变量:普通变量和局部变量。大部分变量都是普通变量,它们一旦生成就可以通过食用saver存入硬盘,
# 局部变量只在session中存在,不会保存。
# slim进一步的区分了变量类型,定义了model
Exemple #6
0
    def build(self):
        config = self.__dict__.copy()
        num_labels      = self.num_labels    #for segmentation (pixel labels)
        ignore_label    = 255   #for segmentation (pixel labels)
        random_seed     = 1234
        generator       = self.resnetG
        discriminator   = self.resnetD
        GEN_A2B_NAME = 'GEN_A2B'
        GEN_B2A_NAME = 'GEN_B2A'
        DIS_A_NAME   = 'DIS_A'
        DIS_B_NAME   = 'DIS_B'

        global_step = tf.train.get_or_create_global_step()
        slim.add_model_variable(global_step)
        global_step_update = tf.assign_add(global_step, 1, name='global_step_update')

        def resize_and_onehot(tensor, shape, depth):
            with tf.device('/device:CPU:0'):
                onehot_tensor = tf.one_hot(tf.squeeze( 
                                        tf.image.resize_nearest_neighbor(
                                            tf.cast(tensor, tf.int32), shape), -1), depth=depth)
                return onehot_tensor
        def convert_to_labels(onehot_seg, crop_size=None):
            fake_segments_output = onehot_seg
            print ('%s | ' % fake_segments_output.device, fake_segments_output)
            if crop_size:
                fake_segments_output = tf.image.resize_bilinear(fake_segments_output, crop_size) #tf.shape(source_segments_batch)[1:3])
            fake_segments_output = tf.argmax(fake_segments_output, axis=-1) # generate segment indices matrix
            fake_segments_output = tf.expand_dims(fake_segments_output, dim=-1) # Create 4-d tensor.
            return fake_segments_output

        target_data_queue = []
        tf.set_random_seed(random_seed)
        coord = tf.train.Coordinator()
        with tf.name_scope("create_inputs"):
            for i, data in enumerate([config['source_data']] + config['target_data']):
                reader = ImageReader(
                    data['data_dir'],
                    data['data_list'],
                    config['crop_size'],                    # Original size: [1024, 2048]
                    random_scale=config['random_scale'],
                    random_mirror=True,
                    ignore_label=ignore_label,
                    img_mean=0,                             # set IMG_MEAN to centralize image pixels (set NONE for automatic choosing)
                    img_channel_format='RGB',               # Default: BGR in deeplab_v2. See here: https://github.com/zhengyang-wang/Deeplab-v2--ResNet-101--Tensorflow/issues/30
                    coord=coord,
                    rgb_label=False)
                data_queue = reader.dequeue(config['batch_size'])

                if i == 0:
                    # ---[ source: training data
                    source_images_batch    = data_queue[0]  #A: 3 chaanels
                    source_segments_batch  = data_queue[1]  #B: 1-label channels

                    source_images_batch    = tf.cast(source_images_batch, tf.float32) / 127.5 - 1.

                    source_images_batch    = tf.image.resize_bilinear(source_images_batch, config['resize'])  #A: 3 chaanels
                    source_segments_batch  = tf.image.resize_nearest_neighbor(source_segments_batch, config['resize'])  #B: 1-label channels

                    source_segments_batch  = tf.cast(tf.one_hot(tf.squeeze(source_segments_batch, -1), depth=num_labels), tf.float32) - 0.5 #B: 19 channels

                else:
                    # ---[ target: validation data / testing data
                    target_images_batch    = data_queue[0]  #A: 3 chaanels
                    target_segments_batch  = data_queue[1]  #B: 1-label channels

                    target_images_batch    = tf.cast(target_images_batch, tf.float32) / 127.5 - 1.

                    target_images_batch    = tf.image.resize_bilinear(target_images_batch, config['resize'])  #A: 3 chaanels
                    target_segments_batch  = tf.image.resize_nearest_neighbor(target_segments_batch, config['resize'])  #B: 1-label channels

                    target_segments_batch  = tf.cast(tf.one_hot(tf.squeeze(target_segments_batch, -1), depth=num_labels), tf.float32) - 0.5 #B: 19 channels
                    target_data_queue.append([target_images_batch, target_segments_batch])


        size_list = cuttool(config['batch_size'], config['gpus'])
        source_images_batches    = tf.split(source_images_batch,   size_list)
        source_segments_batches  = tf.split(source_segments_batch, size_list)
        fake_1_segments_output   = [None] * len(size_list)
        fake_2_segments_output   = [None] * len(size_list)
        fake_1_images_output     = [None] * len(size_list)
        fake_2_images_output     = [None] * len(size_list)
        d_real_img_output        = [None] * len(size_list)
        d_fake_img_output        = [None] * len(size_list)
        d_real_seg_output        = [None] * len(size_list)
        d_fake_seg_output        = [None] * len(size_list)

        for gid, (source_images_batch, source_segments_batch) in \
                enumerate(zip(source_images_batches, source_segments_batches)):
            # ---[ Generator A2B & B2A
            with tf.device('/device:GPU:{}'.format((gid-1) % config['gpus'])):
                fake_seg  = generator(source_images_batch, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_A2B_NAME)
                fake_seg  = tf.nn.softmax(fake_seg) - 0.5
                fake_img_ = generator(fake_seg, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_B2A_NAME)
                fake_img_ = tf.nn.tanh(fake_img_)
                fake_img  = generator(source_segments_batch, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_B2A_NAME)
                fake_img  = tf.nn.tanh(fake_img)
                fake_seg_ = generator(fake_img, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=True, scope=GEN_A2B_NAME)
                fake_seg_ = tf.nn.softmax(fake_seg_) - 0.5

            # ---[ Discriminator A & B
            with tf.device('/device:GPU:{}'.format((gid-1) % config['gpus'])):
                d_real_img = discriminator(source_images_batch,   reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_A_NAME)
                d_fake_img = discriminator(fake_img, reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_A_NAME)
                d_real_seg = discriminator(source_segments_batch, reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_B_NAME)
                d_fake_seg = discriminator(fake_seg, reuse=tf.AUTO_REUSE, phase_train=True, scope=DIS_B_NAME)
                #d_fake_img_val = discriminator(fake_img_val, reuse=tf.AUTO_REUSE, phase_train=False, scope=DIS_A_NAME)
                #d_fake_seg_val = discriminator(fake_seg_val, reuse=tf.AUTO_REUSE, phase_train=False, scope=DIS_B_NAME)


                fake_1_segments_output [gid]  = fake_seg
                fake_2_segments_output [gid]  = fake_seg_
                fake_1_images_output [gid]    = fake_img
                fake_2_images_output [gid]    = fake_img_

                d_real_img_output [gid]       = d_real_img
                d_fake_img_output [gid]       = d_fake_img
                d_real_seg_output [gid]       = d_real_seg
                d_fake_seg_output [gid]       = d_fake_seg

        source_images_batch    = tf.concat(source_images_batches, axis=0)   #-1~1
        source_segments_batch  = tf.concat(source_segments_batches, axis=0) #onehot: -0.5~+0.5
        fake_1_segments_output = tf.concat(fake_1_segments_output, axis=0)  ;   print('fake_1_segments_output', fake_1_segments_output)
        fake_2_segments_output = tf.concat(fake_2_segments_output, axis=0)  ;   print('fake_2_segments_output', fake_2_segments_output)
        fake_1_images_output   = tf.concat(fake_1_images_output  , axis=0)  ;   print('fake_1_images_output  ', fake_1_images_output  )
        fake_2_images_output   = tf.concat(fake_2_images_output  , axis=0)  ;   print('fake_2_images_output  ', fake_2_images_output  )
        d_real_img_output      = tf.concat(d_real_img_output , axis=0)
        d_fake_img_output      = tf.concat(d_fake_img_output , axis=0)
        d_real_seg_output      = tf.concat(d_real_seg_output , axis=0)
        d_fake_seg_output      = tf.concat(d_fake_seg_output , axis=0)

        source_data_color = [
            (1.+source_images_batch   ) / 2.                                                                ,         # source_images_batch_color
            sgtools.decode_labels(tf.cast(convert_to_labels(source_segments_batch + 0.5), tf.int32),  num_labels),    # source_segments_batch_colo
            sgtools.decode_labels(tf.cast(convert_to_labels(fake_1_segments_output + 0.5), tf.int32),  num_labels),   # fake_1_segments_output_col
            sgtools.decode_labels(tf.cast(convert_to_labels(fake_2_segments_output + 0.5), tf.int32),  num_labels),   # fake_2_segments_output_col
            (1.+fake_1_images_output  ) / 2.                                                                ,         # fake_1_images_output_color
            (1.+fake_2_images_output  ) / 2.                                                                ,         # fake_2_images_output_color
            ]

        # ---[ Validation Model
        target_data_color_queue = []
        for target_data in target_data_queue:
            with tf.device('/device:GPU:{}'.format((2) % config['gpus'])):
                fake_seg  = generator(val_images_holder, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_A2B_NAME)
                fake_seg  = tf.nn.softmax(fake_seg) - 0.5
                fake_img_ = generator(fake_seg, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_B2A_NAME)
                fake_img_ = tf.nn.tanh(fake_img_)
                fake_img  = generator(val_segments_holder, output_channel=3, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_B2A_NAME)
                fake_img  = tf.nn.tanh(fake_img)
                fake_seg_ = generator(fake_img, output_channel=num_labels, reuse=tf.AUTO_REUSE, phase_train=False, scope=GEN_A2B_NAME)
                fake_seg_ = tf.nn.softmax(fake_seg) - 0.5

            target_data_color_queue.append([
                    (1.+target_images_batch   ) / 2.                                                          , # target_images_batch_color
                    sgtools.decode_labels(tf.cast(convert_to_labels(target_segments_batch + 0.5), tf.int32),  num_labels)    , # target_segments_batch_color
                    sgtools.decode_labels(tf.cast(convert_to_labels(fake_seg  + 0.5), tf.int32),  num_labels) , # val_fake_1_segments_output_color
                    sgtools.decode_labels(tf.cast(convert_to_labels(fake_seg_ + 0.5), tf.int32),  num_labels) , # val_fake_2_segments_output_color
                    (1.+val_fake_1_images_output  ) / 2.                                                      , # val_fake_1_images_output_color
                    (1.+val_fake_2_images_output  ) / 2.                                                      , # val_fake_2_images_output_color
                    ])

        # ---[ Segment-level loss: pixelwise loss
        # d_seg_batch = tf.image.resize_nearest_neighbor(seg_gt, tf.shape(_d_real['segment'])[1:3])
        # d_seg_batch = tf.squeeze(d_seg_batch, -1)
        # d_seg_loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=d_seg_batch, logits=_d_real['segment'], name='segment_pixelwise_loss')   # pixel-wise loss
        # d_seg_loss = tf.reduce_mean(d_seg_loss)
        # d_seg_loss = tf.identity(d_seg_loss, name='d_seg_loss')

        # ---[ GAN Loss: crite loss
        #d_loss_old = - (tf.reduce_mean(d_source_output['critic']) - tf.reduce_mean(d_target_output['critic']))
        #g_loss = - (tf.reduce_mean(d_target_output['critic']))
        ## gradient penalty
        #LAMBDA = 10
        ##alpha = tf.placeholder(tf.float32, shape=[None], name='alpha')
        #alpha = tf.random_uniform([config['batch_size']], 0.0, 1.0, dtype=tf.float32)
        #for _ in source_segments_batch.shape[1:]:
            #alpha = tf.expand_dims(alpha, axis=1)   #shape=[None,1,1,1]
        #interpolates = alpha * source_segments_batch + (1.-alpha) * target_segments_output
        #print ('source_segments_batch:', source_segments_batch)
        #print ('target_segments_output:',target_segments_output)
        #print ('interpolates:', interpolates)
        #interpolates = resize_and_onehot(interpolates, target_raw_segments_output.shape.as_list()[1:3], num_labels)
        #print ('interpolates:', interpolates)
        #_d_intp = discriminator(interpolates, reuse=True, phase_train=True, scope=DIS_NAME)
        #intp_grads = tf.gradients(_d_intp['critic'], [interpolates])[0]
        #slopes = tf.sqrt(tf.reduce_sum(tf.square(intp_grads), reduction_indices=[1]))   #L2-distance
        #grads_penalty = tf.reduce_mean(tf.square(slopes-1), name='grads_penalty')
        #d_loss = d_loss_old + LAMBDA * grads_penalty


        def sigmoid_cross_entropy(labels, logits):
            return tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits) )
        def least_square(labels, logits):
            return tf.reduce_mean( (labels - logits) ** 2 )

        if config['loss_mode'] == 'lsgan':
            # ---[ GAN loss: LSGAN loss (chi-square, or called least-square)
            loss_func = least_square
        else:
            # ---[ GAN loss: sigmoid BCE loss
            loss_func = sigmoid_cross_entropy

        # ---[ LOSS
        _img_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_images_batch - fake_2_images_output))
        #_seg_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_segments_batch - fake_1_segments_output))   #r1.0: error
        #_seg_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_segments_batch - fake_2_segments_output))   #r2.0
        _seg_recovery = config['L1_lambda'] * tf.reduce_mean( tf.abs(source_segments_batch_color - fake_2_segments_output_color))    #r2.0.5: not sure because, in theory, no gradient if using decode_labels()


        g_loss_a2b = \
                loss_func( labels=tf.ones_like(d_fake_seg_output), logits=d_fake_seg_output ) + \
                _img_recovery + _seg_recovery
        g_loss_b2a = \
                loss_func( labels=tf.ones_like(d_fake_img_output), logits=d_fake_img_output ) + \
                _img_recovery + _seg_recovery
        g_loss = \
                loss_func( labels=tf.ones_like(d_fake_seg_output), logits=d_fake_seg_output ) + \
                loss_func( labels=tf.ones_like(d_fake_img_output), logits=d_fake_img_output ) + \
                _img_recovery + _seg_recovery

        da_loss = \
                loss_func( labels=tf.ones_like(d_real_img_output), logits=d_real_img_output ) + \
                loss_func( labels=tf.zeros_like(d_fake_img_output), logits=d_fake_img_output )
        db_loss = \
                loss_func( labels=tf.ones_like(d_real_seg_output), logits=d_real_seg_output ) + \
                loss_func( labels=tf.zeros_like(d_fake_seg_output), logits=d_fake_seg_output )
        d_loss = \
                (da_loss + db_loss) / 2.

        # D will output [BATCH_SIZE, 32, 32, 1]
        num_da_real_img_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_real_img_output), axis=[1,2,3]) > 0.5)[:,0], name='num_da_real_img_acc' )
        num_da_fake_img_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_fake_img_output), axis=[1,2,3]) < 0.5)[:,0], name='num_da_fake_img_acc' )
        num_db_real_seg_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_real_seg_output), axis=[1,2,3]) > 0.5)[:,0], name='num_db_real_seg_acc' )
        num_db_fake_seg_acc = tf.size( tf.where(tf.reduce_mean(tf.nn.sigmoid(d_fake_seg_output), axis=[1,2,3]) < 0.5)[:,0], name='num_db_fake_seg_acc' )

        ## limit weights to 0
        #g_weight_regularizer = [0.0001 * tf.nn.l2_loss(v) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GEN_NAME) if 'weight' in v.name]
        #g_weight_regularizer = tf.add_n(g_weight_regularizer, name='g_weight_regularizer_loss')
        #g_loss += g_weight_regularizer
        #d_weight_regularizer = [0.0001 * tf.nn.l2_loss(v) for v in tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, DIS_NAME) if 'weight' in v.name]
        #d_weight_regularizer = tf.add_n(d_weight_regularizer, name='d_weight_regularizer_loss')
        #d_loss += d_weight_regularizer

        d_loss = tf.identity(d_loss, name='d_loss')
        g_loss = tf.identity(g_loss, name='g_loss')

        ## --- Training Set Validation ---
        # Predictions.
        #pred_gt = tf.reshape(target_segments_batch, [-1,])
        #pred    = tf.reshape(target_segments_output, [-1,])
        #indices = tf.squeeze(tf.where(tf.not_equal(pred_gt, ignore_label)), 1)
        #pred_gt = tf.cast(tf.gather(pred_gt, indices), tf.int32)
        #pred    = tf.cast(tf.gather(pred, indices), tf.int32)
        ## mIoU
        ### Allowing to use indices matrices in mean_iou() with `num_classes=indices.max()`
        #weights = tf.cast(tf.less_equal(pred_gt, num_labels), tf.int32) # Ignoring all labels greater than or equal to n_classes.
        #mIoU, mIoU_update_op = tf.metrics.mean_iou(pred, pred_gt, num_classes=num_labels, weights=weights)

        # ---[ Variables
        g_a2b_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GEN_A2B_NAME)
        g_b2a_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, GEN_B2A_NAME)
        d_a_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, DIS_A_NAME)
        d_b_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, DIS_B_NAME)
        g_vars = g_a2b_vars + g_b2a_vars
        d_vars = d_a_vars + d_b_vars

        print_list(g_a2b_vars, GEN_A2B_NAME)
        print_list(g_b2a_vars, GEN_B2A_NAME)
        print_list(d_a_vars, DIS_A_NAME)
        print_list(d_b_vars, DIS_B_NAME)

        # ---[ Optimizer
        ## `colocate_gradients_with_ops = True` to reduce GPU MEM utils, and fasten training speed
        OPT_NAME = 'Optimizer'
        g_opts = []; d_opts = []
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            with tf.variable_scope(OPT_NAME):
                #with tf.device('/device:GPU:{}'.format(config['gpus']-1)):
                if True:
                    if len(g_vars) > 0:
                        g_opt = tf.train.AdamOptimizer(learning_rate=config['g_lr'], beta1=0.5, beta2=0.9).minimize(g_loss,
                            var_list=g_vars, colocate_gradients_with_ops=True)
                        g_opts.append(g_opt)
                    if len(d_vars) > 0:
                        d_opt = tf.train.AdamOptimizer(learning_rate=config['d_lr'], beta1=0.5, beta2=0.9).minimize(d_loss,
                            var_list=d_vars, colocate_gradients_with_ops=True)
                        d_opts.append(d_opt)

        g_opt = tf.group(*g_opts)
        d_opt = tf.group(*d_opts)
        opt_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, OPT_NAME)
        print_list(opt_vars, OPT_NAME)

        # --- [ Summary
        scalars   = [d_loss, g_loss]
        #scalars  += [mIoU]
        scalars  += [num_da_real_img_acc, num_da_fake_img_acc, num_db_real_seg_acc, num_db_fake_seg_acc]
        scalars  += [g_loss_a2b, g_loss_b2a, da_loss, db_loss]
        writer, summarys = create_summary(summary_dir=config['summary_dir'], name=config['suffix'],
                scalar = scalars,
                )

        '''
        Training
        '''
        with tf.Session(config=GpuConfig) as sess:
            sess.run(tf.global_variables_initializer()) #DONOT put it after ``saver.restore``
            sess.run(tf.local_variables_initializer()) #DONOT put it after ``saver.restore``
            saver = tf.train.Saver(g_vars + d_vars, max_to_keep=1)
            #g_saver = tf.train.Saver(g_vars, max_to_keep=1)
            #d_saver = tf.train.Saver(d_vars, max_to_keep=1)
            #if self.ckpt:
                #saver.restore(sess, self.ckpt)
                #print ("Training starts at %d iteration..." % sess.run(global_step))

            feeds = {}

            # Start queue threads.
            threads = tf.train.start_queue_runners(coord=coord, sess=sess)

            inside_epoch  = int(config['print_epoch']) if config['print_epoch'] < config['max_epoch'] else int(config['max_epoch'] / 1)
            outside_epoch = int(config['max_epoch'] / inside_epoch)
            start = int(sess.run(global_step) / inside_epoch)
            if start >= outside_epoch:
                raise ValueError("initial iteration:%d >= max iteration:%d. please reset '--max_epoch' value." % (sess.run(global_step), config['max_epoch']))

            start_time = time.time()
            for epo in range(start, outside_epoch):
                bar = IncrementalBar('[epoch {:<4d}/{:<4d}]'.format(epo, outside_epoch), max=inside_epoch)
                for epi in range(inside_epoch):
                    iters = sess.run(global_step)
                    # save summary
                    if epo == 0:
                        save_summarys = sess.run(summarys, feed_dict=feeds)
                        writer.add_summary(save_summarys, iters)

                    for _ in range(config['d_epoch']):
                        sess.run(d_opt, feed_dict=feeds)

                    if iters > self.pretrain_D_epoch:
                        for _ in range(config['g_epoch']):
                            sess.run(g_opt, feed_dict=feeds)

                    sess.run(global_step_update)
                    bar.next()

                duration = time.time() - start_time
                disc_loss, gen_loss = \
                        sess.run([d_loss, g_loss], feed_dict=feeds)
                na_real, na_fake, nb_real, nb_fake = \
                        sess.run([num_da_real_img_acc, num_da_fake_img_acc, num_db_real_seg_acc, num_db_fake_seg_acc], feed_dict=feeds)

                #sess.run(mIoU_update_op, feed_dict=feeds)
                #miou = sess.run(mIoU, feed_dict=feeds)
                print (' -',
                        'DLoss: %-8.2e' % disc_loss,
                        #'(W: %-8.2e)' % disc_wloss,
                        'GLoss: %-8.2e' % gen_loss,
                        #'(W: %-8.2e)' % gen_wloss,
                        '|',
                        '[Da_img] #real: %d, #fake: %d' % (na_real, na_fake),
                        '[Db_seg] #real: %d, #fake: %d' % (nb_real, nb_fake),
                        '|',
                        #'[train_mIoU] %.2f' % miou,
                        '[ETA] %s' % format_time(duration)
                        )
                bar.finish()

                iters = sess.run(global_step)
                # save checkpoint
                if epo % 2 == 0:
                    saver_path = os.path.join(config['ckpt_dir'], '{}.ckpt'.format(config['name']))
                    saver.save(sess, save_path=saver_path, global_step=global_step)
                # save summary
                if epo % 1 == 0:
                    save_summarys = sess.run(summarys, feed_dict=feeds)
                    writer.add_summary(save_summarys, iters)
                # output samples
                if epo % 5 == 0:
                    img_gt, seg_gt, seg_1, seg_2, img_1, img_2 = sess.run(source_data_color)
                    print ("Range %10s:" % "seg_gt", seg_gt.min(), seg_gt.max())
                    print ("Range %10s:" % "seg_1", seg_1.min(), seg_1.max())
                    print ("Range %10s:" % "seg_2", seg_2.min(), seg_2.max())
                    print ("Range %10s:" % "img_gt", img_gt.min(), img_gt.max())
                    print ("Range %10s:" % "img_1", img_1.min(), img_1.max())
                    print ("Range %10s:" % "img_2", img_2.min(), img_2.max())
                    _output = np.concatenate([img_gt, seg_gt, seg_1, img_1, img_2, seg_2], axis=0)
                    save_visualization(_output, save_path=os.path.join(config['result_dir'], 'tr-{}.jpg'.format(iters)), size=[3, 2*config['batch_size']])
                    #seg_output = np.concatenate([seg_gt, seg_2, seg_1], axis=0)
                    #img_output = np.concatenate([img_gt, img_2, img_1], axis=0)
                    #save_visualization(seg_output, save_path=os.path.join(config['result_dir'], 'tr-seg-1gt_2mapback_3map-{}.jpg'.format(iters)), size=[3, config['batch_size']])
                    #save_visualization(img_output, save_path=os.path.join(config['result_dir'], 'tr-img-1gt_2mapback_3map-{}.jpg'.format(iters)), size=[3, config['batch_size']])
                    for i,target_data_color in enumerate(target_data_color_queue):
                        val_img_gt, val_seg_gt, val_seg_1, val_seg_2, val_img_1, val_img_2 = sess.run(target_data_color)
                        print ("Val Range %10s:" % "seg_gt", val_seg_gt.min(), val_seg_gt.max())
                        print ("Val Range %10s:" % "seg_1", val_seg_1.min(), val_seg_1.max())
                        print ("Val Range %10s:" % "seg_2", val_seg_2.min(), val_seg_2.max())
                        print ("Val Range %10s:" % "img_gt", val_img_gt.min(), val_img_gt.max())
                        print ("Val Range %10s:" % "img_1", val_img_1.min(), val_img_1.max())
                        print ("Val Range %10s:" % "img_2", val_img_2.min(), val_img_2.max())
                        _output = np.concatenate([val_img_gt, val_seg_gt, val_seg_1, val_img_1, val_img_2, val_seg_2], axis=0)
                        save_visualization(_output, save_path=os.path.join(config['result_dir'], 'val{}-{}.jpg'.format(i,iters)), size=[3, 2*config['batch_size']])
                        #val_seg_output = np.concatenate([val_seg_gt, val_seg_2, val_seg_1], axis=0)
                        #val_img_output = np.concatenate([val_img_gt, val_img_2, val_img_1], axis=0)
                        #save_visualization(seg_output, save_path=os.path.join(config['result_dir'], 'val{}-seg-1gt_2mapback_3map-{}.jpg'.format(i,iters)), size=[3, config['batch_size']])
                        #save_visualization(img_output, save_path=os.path.join(config['result_dir'], 'val{}-img-1gt_2mapback_3map-{}.jpg'.format(i,iters)), size=[3, config['batch_size']])

                writer.flush()
            writer.close()
Exemple #7
0
# Model variables.
weights = slim.model_variable('weights',
	shape=[10, 10, 3 , 3],
	initializer=tf.truncated_normal_initializer(stddev=0.1),
	regularizer=slim.l2_regularizer(0.05),
	device='/CPU:0')
model_variables = slim.get_model_variables()

# Regular variables.
my_var = slim.variable('my_var',
	shape=[20, 1],
	initializer=tf.zeros_initializer())
regular_variables_and_model_variables = slim.get_variables()

"""
my_model_variable = CreateViaCustomCode()

# Letting TF-Slim know about the additional variable.
slim.add_model_variable(my_model_variable)
"""

#--------------------------------------------------------------------
# Layers.

input_shape = (None, 224, 224, 3)
input_tensor = tf.placeholder(tf.float32, shape=input_shape)

"""
net = slim.conv2d(input_tensor, 256, [3, 3], scope='conv3_1')
net = slim.conv2d(net, 256, [3, 3], scope='conv3_2')
Exemple #8
0
    regularizer=slim.l2_regularizer(0.05),
    device='/CPU:0')
model_variables = slim.get_model_variables()

# Regular variables
my_var = slim.variable('my_var',
                       shape=[20, 1],
                       initializer=tf.zeros_initializer())
regular_variables_and_model_variables = slim.get_variables()

var_aa = tf.get_variable("aaaVar", shape=[10, 20])

my_model_variable = var_aa

# Letting TF-Slim know about the additional variable.
slim.add_model_variable(my_model_variable)

input = tf.get_variable("input_var", shape=[1, 24, 24, 3], dtype=tf.float32)

net = slim.conv2d(input, 128, [3, 3], scope='conv1_1')
print(net)

slim.repeat()


def vgg16(inputs):
    with slim.arg_scope([slim.conv2d, slim.fully_connected],
                        activation_fn=tf.nn.relu,
                        weights_initializer=tf.truncated_normal_initializer(
                            0.0, 0.01),
                        weights_regularizer=slim.l2_regularizer(0.0005)):
Exemple #9
0
def seqvlad(net, videos_per_batch, weight_decay, netvlad_initCenters):
    # Sequential VLAD pooling
    end_points = {}
    try:
      print('net shape():', net.get_shape().as_list())
      netvlad_initCenters = int(netvlad_initCenters)
      # initialize the cluster centers randomly
      cluster_centers = np.random.normal(size=(
        netvlad_initCenters, net.get_shape().as_list()[-1]))
      logging.info('Randomly initializing the {} netvlad cluster '
                   'centers'.format(cluster_centers.shape))
    except ValueError:

      print('<netvlad_initCenters> must be a [interger] for <seqvlad> pooling types ...')
      exit()
    with tf.variable_scope('NetVLAD'):
        # normalize features
        net_normed = tf.nn.l2_normalize(net, 3, name='FeatureNorm')
        end_points[tf.get_variable_scope().name + '/net_normed'] = net_normed
        
	#net_relu = tf.nn.relu(net)
        #end_points[tf.get_variable_scope().name+'/net_relu']=net_relu
        
	# model parameters
        centers_num = 64
        input_shape = net.get_shape().as_list()
        # initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(input_shape[-1])) # yanbin liu
        
        
        # share_w
        share_w = tf.get_variable('vlad_W',
                              shape=[1, 1, input_shape[-1], centers_num], #[filter_height, filter_width, in_channels, out_channels]
                              initializer=tf.contrib.layers.xavier_initializer(),
                              
                              regularizer=slim.l2_regularizer(weight_decay),
                              )
        share_b = tf.get_variable('vlad_B',
                              shape=[centers_num],
                              initializer=tf.truncated_normal_initializer(stddev=0.1),
                              regularizer=slim.l2_regularizer(weight_decay))

        centers = tf.get_variable('centers',
                              shape=[1,input_shape[-1],centers_num],
                              initializer=tf.truncated_normal_initializer(stddev=0.1),#tf.constant_initializer(cluster_centers),
                              regularizer=slim.l2_regularizer(weight_decay),
                              )

        U_z = tf.get_variable('U_z',
                              shape=[3, 3, centers_num, centers_num], #[filter_height, filter_width, in_channels, out_channels]
                              initializer=tf.contrib.layers.xavier_initializer(),
                              regularizer=slim.l2_regularizer(weight_decay),
                              )
        U_r = tf.get_variable('U_r',
                              shape=[3, 3, centers_num, centers_num], #[filter_height, filter_width, in_channels, out_channels]
                              initializer=tf.contrib.layers.xavier_initializer(),
                              regularizer=slim.l2_regularizer(weight_decay),
                              )
        U_h = tf.get_variable('U_h',
                              shape=[3, 3, centers_num, centers_num], #[filter_height, filter_width, in_channels, out_channels]
                              initializer=tf.contrib.layers.xavier_initializer(),
                              regularizer=slim.l2_regularizer(weight_decay),
                              )
        slim.add_model_variable(share_w)
        slim.add_model_variable(share_b)
        slim.add_model_variable(centers)
        slim.add_model_variable(U_z)
        slim.add_model_variable(U_r)
        slim.add_model_variable(U_h)
        
        centers = tf.reshape(centers, [1, input_shape[1], centers_num])



        # add parameters to end_poins
        end_points[tf.get_variable_scope().name + '/vlad_W'] = share_w
        end_points[tf.get_variable_scope().name + '/vlad_B'] = share_b
        end_points[tf.get_variable_scope().name + '/centers'] = centers
        end_points[tf.get_variable_scope().name + '/U_z'] = U_z
        end_points[tf.get_variable_scope().name + '/U_r'] = U_r
        end_points[tf.get_variable_scope().name + '/U_h'] = U_h

        # seqvlad 
        input_shape = net.get_shape().as_list()
        timesteps = input_shape[0]//videos_per_batch
        print("################## timesteps", timesteps)
	
        assert input_shape[0]%videos_per_batch==0
        # assignment = tf.reshape(net,[videos_per_batch, -1, input_shape[]])
        w_conv_x = tf.nn.conv2d(net, share_w, [1,1,1,1], 'SAME', name='w_conv_x')
        assignments = tf.nn.bias_add(w_convx, share_b)

        assignments = tf.reshape(assignments, [videos_per_batch, -1, input_shape[1], input_shape[2], centers_num])
        print('assignments', assignments.get_shape().as_list())


        axis = [1,0]+list(range(2,5))  # axis = [1,0,2]
        assignments = tf.transpose(assignments, perm=axis)

        input_assignments = tf.TensorArray(
                dtype=assignments.dtype,
                size=timesteps,
                tensor_array_name='input_assignments')
        if hasattr(input_assignments, 'unstack'):
          input_assignments = input_assignments.unstack(assignments)
        else:
          input_assignments = input_assignments.unpack(assignments)  

        hidden_states = tf.TensorArray(
                dtype=tf.float32,
                size=timesteps,
                tensor_array_name='hidden_states')

        def get_init_state(x, output_dims):

          initial_state = tf.zeros_like(x)
          initial_state = tf.reduce_sum(initial_state,axis=[0,4])
          initial_state = tf.expand_dims(initial_state,dim=-1)
          initial_state = tf.tile(initial_state,[1,1,1,output_dims])
          return initial_state

        def step(time, hidden_states, h_tm1):
          assign_t = input_assignments.read(time) # batch_size * dim
          print('h_tm1', h_tm1.get_shape().as_list())
          r = tf.nn.sigmoid(tf.add(assign_t, tf.nn.conv2d(h_tm1, U_r, [1,1,1,1], 'SAME', name='r')))
          z = tf.nn.sigmoid(tf.add(assign_t, tf.nn.conv2d(h_tm1, U_z, [1,1,1,1], 'SAME', name='z')))

          hh = tf.tanh(tf.add(assign_t, tf.nn.conv2d(r*h_tm1, U_h,  [1,1,1,1], 'SAME', name='hh')))

          h = (1-z)*hh + z*h_tm1
        
          hidden_states = hidden_states.write(time, h)

          return (time+1,hidden_states, h)

        time = tf.constant(0, dtype='int32', name='time')
        print('assignments', assignments.get_shape().as_list())
        initial_state = get_init_state(assignments,centers_num)
        print('initial_state', initial_state.get_shape().as_list())
        timesteps = tf.constant(timesteps, dtype='int32',name='timesteps')	
        feature_out = tf.while_loop(
                cond=lambda time, *_: time < timesteps,
                body=step,
                loop_vars=(time, hidden_states, initial_state ),
                parallel_iterations=32,
                swap_memory=True)


        recur_assigns = feature_out[-2]
        if hasattr(hidden_states, 'stack'):
          recur_assigns = recur_assigns.stack()
        else:
          recur_assigns = recur_assigns.pack()

        
        
        axis = [1,0]+list(range(2,5))  # axis = [1,0,2]
        recur_assigns = tf.transpose(recur_assigns, perm=axis)


        recur_assigns = tf.reshape(recur_assigns,[-1,input_shape[1]*input_shape[2],centers_num])

        # assignment = tf.nn.softmax(assignment,dim=-1)

        # for alpha * c
        a_sum = tf.reduce_sum(recur_assigns,-2,keep_dims=True)
        a = tf.multiply(a_sum,centers)
        # for alpha * x
        recur_assigns = tf.transpose(recur_assigns,perm=[0,2,1])
        net = tf.reshape(net,[-1,input_shape[1]*input_shape[2],input_shape[3]])
        vlad = tf.matmul(recur_assigns,net)
        vlad = tf.transpose(vlad, perm=[0,2,1])

        # for differnce
        vlad = tf.subtract(vlad,a)

        vlad = tf.reshape(vlad,[videos_per_batch, -1, input_shape[3], centers_num])
        vlad_rep = tf.reduce_sum(vlad, axis=1)

        end_points[tf.get_variable_scope().name + '/unnormed_vlad'] = vlad_rep
        with tf.name_scope('intranorm'):
          vlad_rep = tf.nn.l2_normalize(vlad_rep, 1)
        end_points[tf.get_variable_scope().name + '/intranormed_vlad'] = vlad_rep
        with tf.name_scope('finalnorm'):
          vlad_rep = tf.reshape(vlad_rep,[-1, input_shape[3]*centers_num])
          vlad_rep = tf.nn.l2_normalize(vlad_rep,-1)


    return vlad_rep, end_points
Exemple #10
0
    regularizer=slim.l2_regularizer(0.05),
    device='/CPU:0')

# 通过model_variable来定义一个代表模型参数的变量,non-model变量指训练、评估过程中需要但推理过程不需要的变量(例如global step)
weights_model_var = slim.model_variable(
    'weights',
    shape=[10, 10, 3, 3],
    initializer=tf.truncated_normal_initializer(stddev=0.1),
    regularizer=slim.l2_regularizer(0.05),
    device='/CPU:0')
model_variables = slim.get_model_variables()

# 定义并获取一个常规的变量
my_var = slim.variable("my_var",
                       shape=[20, 1],
                       initializer=tf.zeros_initializer())
regular_variables_and_model_variables = slim.get_variables()

# slim.model_variable将变量添加到了tf.GrapghKeys.MODEL_VARIABLES容器中,也可以手动将自定义的layer或variables添加到对应的容器中
my_custom_var = tf.Variable(1, name="my_custom_var", dtype=tf.int8)
slim.add_model_variable(my_custom_var)

# 定义卷积层
input = slim.variable("input_var", [1, 28, 28, 3])
net = slim.conv2d(
    input,
    128, [3, 3],
    weights_initializer=tf.truncated_normal_initializer(stddev=0.01),
    weights_regularizer=slim.l2_regularizer(0.0005),
    scope="conv1_1")
Exemple #11
0
import tensorflow.contrib.slim as slim
import tensorflow as tf

weight = tf.Variable(tf.ones([2, 3]))
slim.add_model_variable(weight)
model_variables = slim.get_model_variables()
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    print(sess.run(model_variables))