コード例 #1
0
  def _test_image_producer(self, batch_group_size, put_slower_than_get):
    # We use the variable x to simulate a staging area of images. x represents
    # the number of batches in the staging area.
    x = tf.Variable(0, dtype=tf.int32)
    if put_slower_than_get:
      put_dep = self._slow_tensorflow_op()
      get_dep = tf.no_op()
    else:
      put_dep = tf.no_op()
      get_dep = self._slow_tensorflow_op()
    with tf.control_dependencies([put_dep]):
      put_op = x.assign_add(batch_group_size, use_locking=True)
    with tf.control_dependencies([get_dep]):
      get_op = x.assign_sub(1, use_locking=True)
    with self.test_session() as sess:
      sess.run(tf.variables_initializer([x]))
      image_producer = cnn_util.ImageProducer(sess, put_op, batch_group_size,
                                              use_python32_barrier=False)
      image_producer.start()
      for _ in range(5 * batch_group_size):
        sess.run(get_op)
        # We assert x is nonnegative, to ensure image_producer never causes
        # an unstage op to block. We assert x is at most 2 * batch_group_size,
        # to ensure it doesn't use too much memory by storing too many batches
        # in the staging area.
        self.assertGreaterEqual(sess.run(x), 0)
        self.assertLessEqual(sess.run(x), 2 * batch_group_size)
        image_producer.notify_image_consumption()
        self.assertGreaterEqual(sess.run(x), 0)
        self.assertLessEqual(sess.run(x), 2 * batch_group_size)

      image_producer.done()
      time.sleep(0.1)
      self.assertGreaterEqual(sess.run(x), 0)
      self.assertLessEqual(sess.run(x), 2 * batch_group_size)
コード例 #2
0
def model_fn(features, labels, mode):
    models = []
    logits = []
    classes = []
    init_op = [tf.train.get_or_create_global_step().initializer]
    for (i, model_name) in enumerate(FLAGS.model_name.split(',')):
        with tf.device("/gpu:%d" % i):
            network_fn = getattr(nets, model_name)
            models.append(network_fn(features, is_training=False))
            logits.append(models[i].get_outputs()[-2])
            classes.append(tf.argmax(logits[i], axis=1))
            if FLAGS.checkpoint_path is None:
                init_op.extend(models[i].pretrained())

    scaffold = None
    if FLAGS.checkpoint_path is None:
        scaffold = tf.train.Scaffold(init_op=init_op)

    loss = []
    for i in range(len(models)):
        cross_entropy = tf.losses.sparse_softmax_cross_entropy(
            logits=logits[i], labels=labels)
        loss.append(cross_entropy)
    loss = tf.reduce_sum(loss)

    metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:
        metrics = {}
        for i in range(len(models)):
            top1 = tf.metrics.accuracy(labels=labels, predictions=classes[i])
            top5 = contrib.metrics.streaming_sparse_recall_at_k(logits[i],
                                                                tf.cast(
                                                                    labels,
                                                                    tf.int64),
                                                                k=5)
            size = sum(
                [w.shape.num_elements() for w in models[i].get_weights()])
            run_meta = tf.RunMetadata()
            opts = tf.profiler.ProfileOptionBuilder.float_operation()
            opts['output'] = 'none'
            flops = tf.profiler.profile(tf.get_default_graph(),
                                        run_meta=run_meta,
                                        options=opts)
            metrics.update({
                "%dTop1" % i:
                top1,
                "%dTop5" % i:
                top5,
                "%dMAC" % i: (tf.constant(flops.total_float_ops), tf.no_op()),
                "%dSize" % i: (tf.constant(size), tf.no_op())
            })

    return tf.estimator.EstimatorSpec(mode=mode,
                                      scaffold=scaffold,
                                      predictions=None,
                                      loss=loss,
                                      train_op=None,
                                      eval_metric_ops=metrics,
                                      export_outputs=None)
コード例 #3
0
    def train_operation(self, loss, varaiables):
        """
        Training operation

        Parameters
        ----------
        loss : tensor
            current loss of the model
        varaiables : list
            list of the variables that should be trained

        Returns
        -------
        tensorflow operation
            the training operation

        """
        if len(varaiables) > 0:
            gradients = self.optimizer.get_gradients(loss, varaiables)
            grads = list(zip(gradients, varaiables))

            clipped_grads = []
            # Add histograms for gradients.
            for grad, var in grads:
                if grad is not None:
                    # try catching nans
                    grad = tf.where(tf.math.is_finite(grad), grad,
                                    tf.zeros_like(grad))

                    # limit the maximum change to any variable to 0.5
                    grad = tf.clip_by_value(grad, -0.5/self.initial_lr,
                                            0.5/self.initial_lr)

                    clipped_grads += [(grad, var)]
                    tf.summary.histogram(var.op.name + '/gradients', grad)
                    # safe plotting
                    var_plot = tf.where(tf.math.is_finite(var), var,
                                        tf.zeros_like(var))
                    tf.summary.histogram(var.op.name, var_plot)

            # process the updates of the batch norm layers
            # update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
            update_ops = []
            for lay in self.context.update_ops:
                update_ops += lay.updates
            if len(clipped_grads) > 0:
                apply_gradient_op = \
                    self.optimizer.apply_gradients(clipped_grads)

                with tf.control_dependencies([apply_gradient_op] + update_ops):
                    train_parameters_op = tf.no_op(name='train')
            else:
                with tf.control_dependencies(update_ops):
                    train_parameters_op = tf.no_op(name='train')
            return train_parameters_op
        else:
            return tf.no_op(name='train')
コード例 #4
0
 def old_old_weight_update_op(self):
     with tf.name_scope(self._spec.name):
         if self._spec.prune_option != 'second_order_gradient':
             return tf.no_op('gradient_update_no_op')
         if not self._assign_old_old_weight_ops:
             self._get_assign_old_old_weight_ops()
         with tf.control_dependencies(self._assign_old_old_weight_ops):
             tf.logging.info('Updating old old weights.')
             return tf.no_op('old_old_weight_update')
コード例 #5
0
 def gradient_update_op(self):
     with tf.name_scope(self._spec.name):
         if self._spec.prune_option not in ('first_order_gradient',
                                            'second_order_gradient'):
             return tf.no_op('gradient_update_no_op')
         if not self._assign_gradient_ops:
             self._get_assign_gradient_ops()
         with tf.control_dependencies([
                 tf.assign(self._last_gradient_update_step,
                           self._global_step,
                           name='last_gradient_update_step_assign')
         ]):
             with tf.control_dependencies(self._assign_gradient_ops):
                 tf.logging.info('Updating gradients.')
                 return tf.no_op('gradient_update')
コード例 #6
0
ファイル: datasets.py プロジェクト: kokizzu/google-research
 def randomize_op(self):
     """Randomizes the augmentation according to the `random_config`."""
     if self.children:
         return tf.group([child.randomize_op() for child in self.children])
     if self.random_config is None:
         return tf.no_op()
     config = self.random_config
     assign_rotate = self.rotate.assign_bool(config.rotation_probability)
     assign_smooth = self.smooth.assign_bool(config.smooth_probability)
     assign_contrast = self.contrast.assign_bool(
         config.contrast_probability)
     assign_negate = self.negate.assign_bool(config.negate_probability)
     assign_resize = self.resize.assign_bool(config.resize_probability)
     assign_roll = self.roll.assign_bool(config.roll_probability)
     angle_range = config.angle_range / 180.0 * np.pi
     assign_angle = self.angle.assign_uniform(scale=angle_range)
     assign_size = self.size.assign_uniform(scale=1.0)
     assign_alpha = self.alpha.assign_uniform(scale=1.0)
     assign_roll_x = self.roll_x.assign_uniform(scale=config.roll_range)
     assign_roll_y = self.roll_y.assign_uniform(scale=config.roll_range)
     assign_rotate_90 = self.rotate_90_times.assign_uniform(scale=2.0)
     return tf.group(assign_rotate, assign_smooth, assign_contrast,
                     assign_angle, assign_negate, assign_resize,
                     assign_alpha, assign_size, assign_roll, assign_roll_x,
                     assign_roll_y, assign_rotate_90)
コード例 #7
0
    def _add_train_graph(self):
        """Define the training operation."""
        mc = self.mc

        self.global_step = tf.Variable(0, name='global_step', trainable=False)
        lr = tf.train.exponential_decay(mc.LEARNING_RATE,
                                        self.global_step,
                                        mc.DECAY_STEPS,
                                        mc.LR_DECAY_FACTOR,
                                        staircase=True)

        tf.summary.scalar('learning_rate', lr)

        _add_loss_summaries(self.loss)

        opt = tf.train.MomentumOptimizer(learning_rate=lr,
                                         momentum=mc.MOMENTUM)
        grads_vars = opt.compute_gradients(self.loss, tf.trainable_variables())

        with tf.variable_scope('clip_gradient') as scope:
            for i, (grad, var) in enumerate(grads_vars):
                grads_vars[i] = (tf.clip_by_norm(grad, mc.MAX_GRAD_NORM), var)

        apply_gradient_op = opt.apply_gradients(grads_vars,
                                                global_step=self.global_step)

        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

        for grad, var in grads_vars:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)

        with tf.control_dependencies([apply_gradient_op]):
            self.train_op = tf.no_op(name='train')
コード例 #8
0
def LogAndSummarizeMetrics(metrics, use_streaming_mean=True):
    """Logs and summarizes metrics.

  Metrics are added to the LOGGING_OUTPUTS collection.

  Args:
    metrics: A dictionary of scalar metrics.
    use_streaming_mean: If true, the metrics will be averaged using a running
      mean.

  Returns:
    If use_streaming_mean is true, then this will be the op that you need to
    regularly call to update the running mean. Otherwise, this is a no-op.
  """

    prefix = tf.get_default_graph().get_name_scope()
    if prefix:
        prefix += "/"
    logging_collection = tf.get_collection_ref(LOGGING_OUTPUTS)

    update_ops = [tf.no_op()]
    for name, value in metrics.items():
        if use_streaming_mean:
            value, update_op = tf.metrics.mean(value)
            update_ops.append(update_op)
        logging_collection.append((prefix + name, value))
        tf.summary.scalar(name, value)

    return tf.group(*update_ops)
コード例 #9
0
ファイル: model.py プロジェクト: KotoriCANOE/DeblurNet
 def build_g_loss(self, labels, outputs):
     self.g_log_losses = []
     update_ops = []
     loss_key = 'GeneratorLoss'
     with tf.variable_scope(loss_key):
         # L1 loss
         l1_loss = tf.losses.absolute_difference(labels, outputs, 1.0,
             loss_collection=None)
         tf.losses.add_loss(l1_loss)
         update_ops.append(self.loss_summary('l1_loss', l1_loss, self.g_log_losses))
         # SSIM loss
         labelsY = layers.RGB2Y(labels, self.data_format)
         outputsY = layers.RGB2Y(outputs, self.data_format)
         ssim_loss = 1 - layers.MS_SSIM2(labelsY, outputsY, sigma=[1.5, 4.0, 10.0],
             L=1, norm=False, data_format=self.data_format)
         tf.losses.add_loss(ssim_loss * 0.1)
         update_ops.append(self.loss_summary('ssim_loss', ssim_loss, self.g_log_losses))
         # regularization loss
         reg_losses = tf.losses.get_regularization_losses('Generator')
         reg_loss = tf.add_n(reg_losses)
         # tf.losses.add_loss(reg_loss)
         update_ops.append(self.loss_summary('reg_loss', reg_loss))
         # final loss
         losses = tf.losses.get_losses(loss_key)
         self.g_loss = tf.add_n(losses, 'total_loss')
         update_ops.append(self.loss_summary('loss', self.g_loss))
         # accumulate operator
         with tf.control_dependencies(update_ops):
             self.g_losses_acc = tf.no_op('accumulator')
コード例 #10
0
def backward():
    x = tf.placeholder(tf.float32,
                       shape=[
                           BATCH_SIZE, lenet5_forward.IMAGE_SIZE,
                           lenet5_forward.IMAGE_SIZE,
                           lenet5_forward.NUM_CHANNELS
                       ])
    y_ = tf.placeholder(tf.float32, [None, lenet5_forward.OUTPUT_NODE])
    y = lenet5_forward.forward(x, True, REGULARIZER)
    global_step = tf.Variable(0, trainable=False)

    ce = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=y,
                                                        labels=tf.argmax(
                                                            y_, 1))
    cem = tf.reduce_mean(ce)
    loss = cem + tf.add_n(tf.get_collection('losses'))
    learning_rate = tf.train.exponential_decay(LEARNING_RATE_BASE,
                                               global_step,
                                               train_num_examples / BATCH_SIZE,
                                               LEARNING_RETE_DECAY,
                                               staircase=True)
    train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(
        loss, global_step=global_step)

    ema = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
    ema_op = ema.apply(tf.trainable_variables())
    with tf.control_dependencies([train_step, ema_op]):
        train_op = tf.no_op(name="train")

    saver = tf.train.Saver()
    img_batch, label_batch = lenet5_generateds.get_tfRecord(BATCH_SIZE, True)

    with tf.Session() as sess:
        init_op = tf.global_variables_initializer()
        sess.run(init_op)

        ckpt = tf.train.get_checkpoint_state(MODEL_SAVE_PATH)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)

        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)

        for i in range(STEPS):
            xs, ys = sess.run([img_batch, label_batch])
            _, loss_val, step = sess.run([train_op, loss, global_step],
                                         feed_dict={
                                             x: xs,
                                             y_: ys
                                         })
            if i % 100 == 0:
                print(
                    "After %d training step(s), loss on training batch is %g."
                    % (step, loss_val))
                saver.save(sess,
                           os.path.join(MODEL_SAVE_PATH, MODEL_NAME),
                           global_step=global_step)

        coord.request_stop()
        coord.join(threads)
コード例 #11
0
def batch_norm(x, is_training, bn_decay):
    input_dims = x.get_shape()[-1].value
    moment_dims = list(range(len(x.get_shape()) - 1))
    beta = tf.Variable(tf.zeros_initializer()(shape=[input_dims]),
                       dtype=tf.float32,
                       trainable=True,
                       name='beta')
    gamma = tf.Variable(tf.ones_initializer()(shape=[input_dims]),
                        dtype=tf.float32,
                        trainable=True,
                        name='gamma')
    batch_mean, batch_var = tf.nn.moments(x, moment_dims, name='moments')

    decay = bn_decay if bn_decay is not None else 0.9
    ema = tf.train.ExponentialMovingAverage(decay=decay)
    # Operator that maintains moving averages of variables.
    ema_apply_op = tf.cond(is_training,
                           lambda: ema.apply([batch_mean, batch_var]),
                           lambda: tf.no_op())

    # Update moving average and return current batch's avg and var.
    def mean_var_with_update():
        with tf.control_dependencies([ema_apply_op]):
            return tf.identity(batch_mean), tf.identity(batch_var)

    # ema.average returns the Variable holding the average of var.
    mean, var = tf.cond(
        is_training, mean_var_with_update, lambda:
        (ema.average(batch_mean), ema.average(batch_var)))
    x = tf.nn.batch_normalization(x, mean, var, beta, gamma, 1e-3)
    return x
コード例 #12
0
ファイル: optimization.py プロジェクト: renogomez/BERT-NER-TF
 def _decay_weights_op(self, var, learning_rate, apply_state):
     do_decay = self._do_use_weight_decay(var.name)
     if do_decay:
         return var.assign_sub(learning_rate * var *
                               apply_state['weight_decay_rate'],
                               use_locking=self._use_locking)
     return tf.no_op()
コード例 #13
0
ファイル: estimator.py プロジェクト: tallamjr/google-research
 def summary(images, name):
     """As a hack, saves image summaries by adding to `eval_metric_ops`."""
     images = tf.saturate_cast(images * 255 + 0.5, tf.uint8)
     eval_metric_ops[name] = (tf.summary.image(name,
                                               images,
                                               max_outputs=2),
                              tf.no_op())
コード例 #14
0
    def _set_up_cache(self):
        """Replace fields with cached versions.

    Returns:
      TensorFlow op to update the cache.
    """
        return tf.no_op()  # By default, don't cache.
コード例 #15
0
 def testReuseVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1, is_training=False)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     # Simulate assigment from saver restore.
     init_assigns = [tf.assign(moving_mean, expected_mean),
                     tf.assign(moving_variance, expected_var)]
     sess.run(init_assigns)
     for _ in range(10):
       sess.run([output], {images: np.random.rand(*image_shape)})
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # Although we feed different images, the moving_mean and moving_variance
     # shouldn't change.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
コード例 #16
0
 def testComputeMovingVars(self):
   height, width = 3, 3
   with self.test_session() as sess:
     image_shape = (10, height, width, 3)
     image_values = np.random.rand(*image_shape)
     expected_mean = np.mean(image_values, axis=(0, 1, 2))
     expected_var = np.var(image_values, axis=(0, 1, 2))
     images = tf.constant(image_values, shape=image_shape, dtype=tf.float32)
     output = ops.batch_norm(images, decay=0.1)
     update_ops = tf.get_collection(ops.UPDATE_OPS_COLLECTION)
     with tf.control_dependencies(update_ops):
       barrier = tf.no_op(name='gradient_barrier')
       output = control_flow_ops.with_dependencies([barrier], output)
     # Initialize all variables
     sess.run(tf.global_variables_initializer())
     moving_mean = variables.get_variables('BatchNorm/moving_mean')[0]
     moving_variance = variables.get_variables('BatchNorm/moving_variance')[0]
     mean, variance = sess.run([moving_mean, moving_variance])
     # After initialization moving_mean == 0 and moving_variance == 1.
     self.assertAllClose(mean, [0] * 3)
     self.assertAllClose(variance, [1] * 3)
     for _ in range(10):
       sess.run([output])
     mean = moving_mean.eval()
     variance = moving_variance.eval()
     # After 10 updates with decay 0.1 moving_mean == expected_mean and
     # moving_variance == expected_var.
     self.assertAllClose(mean, expected_mean)
     self.assertAllClose(variance, expected_var)
コード例 #17
0
ファイル: facenet.py プロジェクト: thaiph99/facenet
def train(total_loss,
          global_step,
          optimizer,
          learning_rate,
          moving_average_decay,
          update_gradient_vars,
          log_histograms=True):
    # Generate moving averages of all losses and associated summaries.
    loss_averages_op = _add_loss_summaries(total_loss)

    # Compute gradients.
    with tf.control_dependencies([loss_averages_op]):
        if optimizer == 'ADAGRAD':
            opt = tf.train.AdagradOptimizer(learning_rate)
        elif optimizer == 'ADADELTA':
            opt = tf.train.AdadeltaOptimizer(learning_rate,
                                             rho=0.9,
                                             epsilon=1e-6)
        elif optimizer == 'ADAM':
            opt = tf.train.AdamOptimizer(learning_rate,
                                         beta1=0.9,
                                         beta2=0.999,
                                         epsilon=0.1)
        elif optimizer == 'RMSPROP':
            opt = tf.train.RMSPropOptimizer(learning_rate,
                                            decay=0.9,
                                            momentum=0.9,
                                            epsilon=1.0)
        elif optimizer == 'MOM':
            opt = tf.train.MomentumOptimizer(learning_rate,
                                             0.9,
                                             use_nesterov=True)
        else:
            raise ValueError('Invalid optimization algorithm')

        grads = opt.compute_gradients(total_loss, update_gradient_vars)

    # Apply gradients.
    apply_gradient_op = opt.apply_gradients(grads, global_step=global_step)

    # Add histograms for trainable variables.
    if log_histograms:
        for var in tf.trainable_variables():
            tf.summary.histogram(var.op.name, var)

    # Add histograms for gradients.
    if log_histograms:
        for grad, var in grads:
            if grad is not None:
                tf.summary.histogram(var.op.name + '/gradients', grad)

    # Track the moving averages of all trainable variables.
    variable_averages = tf.train.ExponentialMovingAverage(
        moving_average_decay, global_step)
    variables_averages_op = variable_averages.apply(tf.trainable_variables())

    with tf.control_dependencies([apply_gradient_op, variables_averages_op]):
        train_op = tf.no_op(name='train')

    return train_op
コード例 #18
0
def construct_model(input_tensors, encoder_w0, decoder0, prefix=None):
    """Construct model."""
    facto = tf.placeholder_with_default(1.0, ())
    context_xs = input_tensors['inputa']
    context_ys = input_tensors['labela']
    target_xs = input_tensors['inputb']
    target_ys = input_tensors['labelb']

    # sample ws ~ w|(x_all,a), rs = T(ws, ys), r = mean(rs), z = T(r)
    # x_all = tf.concat([context_xs, target_xs], axis=1) #n_task * 20 * (128*128)
    # y_all = tf.concat([context_ys, target_ys], axis=1)

    x_all = context_xs
    y_all = context_ys

    # n_task * [n_im] * d_z
    if 'train' in prefix:
        z_samples = xy_to_z(x_all, y_all, encoder_w0) * facto
    else:
        z_samples = xy_to_z(context_xs, context_ys, encoder_w0) * facto

    target_ws = encoder_w(target_xs, encoder_w0)
    input_zxs = tf.concat([z_samples, target_ws], axis=-1)

    # sample y_hat ~  y|(w,z)
    with tf.variable_scope('decoder'):
        target_yhat_mu = decoder0(input_zxs)  # n_task * n_im * dim_y

    # when var of  p(y | x,z) is fixed, neg-loglik <=> MSE
    mse_loss = mse(target_yhat_mu, target_ys)

    tf.summary.scalar(prefix + 'mse', mse_loss)
    optimizer1 = tf.train.AdamOptimizer(FLAGS.update_lr)
    optimizer2 = tf.train.AdamOptimizer(FLAGS.update_lr)

    if 'train' in prefix:
        if FLAGS.weight_decay:
            loss = mse_loss
            optimizer = contrib_opt.AdamWOptimizer(
                weight_decay=FLAGS.beta, learning_rate=FLAGS.update_lr)
            gvs = optimizer.compute_gradients(loss)
            train_op = optimizer.apply_gradients(gvs)
        else:
            THETA = (  # pylint: disable=invalid-name
                tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                  scope='decoder') +
                tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES,
                                  scope='encoder_w'))
            all_var = tf.trainable_variables()
            PHI = [v for v in all_var if v not in THETA]  # pylint: disable=invalid-name
            loss = mse_loss
            gvs_theta = optimizer1.compute_gradients(loss, THETA)
            train_theta_op = optimizer1.apply_gradients(gvs_theta)
            gvs_phi = optimizer2.compute_gradients(loss, PHI)
            train_phi_op = optimizer2.apply_gradients(gvs_phi)
            with tf.control_dependencies([train_theta_op, train_phi_op]):
                train_op = tf.no_op()
        return mse_loss, train_op, facto
    else:
        return mse_loss
コード例 #19
0
    def Lock(self):
        """Used to lock in the personalization."""
        lock_ops = [tf.no_op()]  # 什么都不做,仅做为点位符使用控制边界。

        if self.lowrank_adaptation:
            # compute the new W
            left_adapt = tf.squeeze(self.left_adapt)
            right_adapt = tf.squeeze(self.right_adapt)
            # final_w = self.W + tf.matmul(left_adapt, right_adapt)
            final_w = tf.add(tf.matmul(left_adapt, right_adapt),self.W)
            #self.lockedW = final_w
            #lock_ops.append(self.lockedW)
            # self.lockedW.assign(final_w)
            #lock = tf.assign(self.lockedW, final_w) #您可以使用tf.identity取消引用_ref类型
            self.lockedW = final_w
            lock2 = tf.identity(self.lockedW)
            lock_ops.append(lock2) #
        else:
            # self.lockedW = self.W
            # lock_ops.append(self.lockedW)
            lock3 = tf.assign(self.lockedW, self.W)
            lock4 = tf.identity(self.lockedW)
            lock_ops.append(lock4)

            # lock_ops.append(self.lockedW.assign(self.W))

        if self.mikolov_adapt:
            final_bias = tf.squeeze(self.bias + self.delta)
            lock_ops.append(self.lockedBias.assign(final_bias))
        else:
            lock_ops.append(self.lockedBias.assign(self.bias))

        # self.lock_op = tf.group(*lock_ops,name="lock_op") # 没有返回值,只是个op
        self.lock_op = tf.tuple(lock_ops,name="lock_op") #返回的是list of tensor。
コード例 #20
0
ファイル: lstm_models.py プロジェクト: wangeen/magenta
  def reconstruction_loss(self, x_input, x_target, x_length, z=None,
                          c_input=None):
    # Split output for each core model.
    split_x_input = tf.split(x_input, self._output_depths, axis=-1)
    split_x_target = tf.split(x_target, self._output_depths, axis=-1)
    loss_outputs = []

    # Compute reconstruction losses for the split output.
    for i, cd in enumerate(self._core_decoders):
      with tf.variable_scope('core_decoder_%d' % i):
        # TODO(adarob): Sample initial inputs when using scheduled sampling.
        loss_outputs.append(
            cd.reconstruction_loss(
                split_x_input[i], split_x_target[i], x_length, z, c_input))

    r_losses, metric_maps, decode_results = list(zip(*loss_outputs))

    # Merge the metric maps by passing through renamed values and taking the
    # mean across the splits.
    merged_metric_map = {}
    for metric_name in metric_maps[0]:
      metric_values = []
      for i, m in enumerate(metric_maps):
        merged_metric_map['%s/output_%d' % (metric_name, i)] = m[metric_name]
        metric_values.append(m[metric_name][0])
      merged_metric_map[metric_name] = (
          tf.reduce_mean(metric_values), tf.no_op())

    return (tf.reduce_sum(r_losses, axis=0),
            merged_metric_map,
            self._merge_decode_results(decode_results))
コード例 #21
0
 def decompress_blocks(self, sess, blocks, x_shape, debug=False):
     """Uses the decompression model to decompress a point cloud"""
     dec_blocks = []
     debug_t_list = []
     for i, (strings, best_threshold_idx) in enumerate(blocks):
         logger.info(f'Decompress block {i}/{len(blocks)}: start')
         strings = [[s] for s in strings]
         threshold = self.thresholds[best_threshold_idx]
         logger.info(f'Decompress block {i}/{len(blocks)}: run session')
         fetches = [self.x_hat, self.debug_tensors if debug else tf.no_op()]
         x_hat, debug_tensors = sess.run(
             fetches,
             feed_dict={
                 self.x_shape_t: x_shape,
                 **dict(zip(self.strings_t, strings))
             })
         logger.info(f'Decompress block {i}/{len(blocks)}: session done')
         x_hat = x_hat[
             0,
             0, :, :, :] if self.data_format == 'channels_first' else x_hat[
                 0, :, :, :, 0]
         x_hat = x_hat > threshold
         pa = np.argwhere(x_hat).astype('float32')
         logger.info(f'Decompress block {i}/{len(blocks)}: done')
         dec_blocks.append(pa)
         debug_t_list.append(debug_tensors)
     return dec_blocks, debug_t_list
コード例 #22
0
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
    """Asserts that shape_a and shape_b are the same along the 0th-dimension.

  If the shapes are static, raises a ValueError when the shapes
  mismatch.

  If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
  mismatch.

  Args:
    shape_a: a list containing shape of the first tensor.
    shape_b: a list containing shape of the second tensor.

  Returns:
    Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
    when the shapes are dynamic.

  Raises:
    ValueError: When shapes are both static and unequal.
  """
    if isinstance(shape_a[0], int) and isinstance(shape_b[0], int):
        if shape_a[0] != shape_b[0]:
            raise ValueError('Unequal first dimension {}, {}'.format(
                shape_a[0], shape_b[0]))
        else:
            return tf.no_op()
    else:
        return tf.assert_equal(shape_a[0], shape_b[0])
コード例 #23
0
    def __run_test(self, sess):
        # Var is initialized by var_init
        var = tf.get_variable('var', [1], dtype=tf.int32)
        var_init = tf.constant([0])
        var_initialize = var.assign(var_init)

        # Computation of mean
        input1 = tf.constant(
            [[[1.0, 2.0], [3.0, 4.0]], [[1.0, 2.0], [3.0, 4.0]]], name='input1')
        mean = tf.reduce_mean(input1, var)

        # For updating the Var
        const_var = tf.constant([1])
        var_add = tf.add(var, const_var)
        var_update = var.assign(var_add)

        # update to happen after mean computation
        with tf.control_dependencies([mean]):
            var_update = var.assign(var_add)

        with tf.control_dependencies([var_update]):
            update_op = tf.no_op('train_op')

        # Initialize Var
        var_init_value = sess.run((var_initialize))

        # Compute mean and var updates
        mean_values = []
        for i in range(3):
            (result_mean, result_up) = sess.run((mean, update_op))
            mean_values.append(result_mean)

        # Compute Final Values
        var_final_val = var.eval(sess)
        return var_init_value, mean_values, var_final_val
コード例 #24
0
  def _compressor_op(self, matrix_compressor, a_matrix_tfvar):
    """Creates compressor op based on simhash matrix_compressor.

    Meant to create the factors once at begin_compression_step tailored
    for simhash (which has only one output b_matrix).

    Args:
      matrix_compressor: specifies the matrix compressor object.
      a_matrix_tfvar: the tf tensor to be compressed.

    Returns:
      a tf.no_op object with assign ops as control dependencies.
    """
    # py_func is not supported on TPU so need non py_func implementation
    # The following line seems to be needed otherwise it says machines with tpu
    # don't support pyfunc.
    use_tpu = self._spec.use_tpu

    if use_tpu:
      [b_matrix_out] = matrix_compressor.tpu_matrix_compressor(a_matrix_tfvar)
    else:
      [b_matrix_out
      ] = tf.compat.v1.py_func(matrix_compressor.static_matrix_compressor,
                               [a_matrix_tfvar], [tf.float32])

    b_matrix_assign_op = tf.compat.v1.assign(
        self.b_matrix_tfvar, b_matrix_out, name='_b_matrix_assign_op')
    with tf.control_dependencies([b_matrix_assign_op]):
      return tf.no_op('compresor_b_matrix_update')
コード例 #25
0
def assert_shape_equal(shape_a, shape_b):
    """Asserts that shape_a and shape_b are equal.

  If the shapes are static, raises a ValueError when the shapes
  mismatch.

  If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
  mismatch.

  Args:
    shape_a: a list containing shape of the first tensor.
    shape_b: a list containing shape of the second tensor.

  Returns:
    Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
    when the shapes are dynamic.

  Raises:
    ValueError: When shapes are both static and unequal.
  """
    if (all(isinstance(dim, int) for dim in shape_a)
            and all(isinstance(dim, int) for dim in shape_b)):
        if shape_a != shape_b:
            raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
        else:
            return tf.no_op()
    else:
        return tf.assert_equal(shape_a, shape_b)
コード例 #26
0
ファイル: model.py プロジェクト: kengchikengchi/compression
  def _train_discriminator(self, nodes: Nodes, optimizer, create_summaries):
    """Creates a train_op for the discriminator.

    Args:
      nodes: Instance of Nodes, the nodes of the model to feed to D.
      optimizer: Discriminator optimizer. Passed in because it will be re-used
        in the different discriminator steps.
      create_summaries: If True, create summaries.

    Returns:
      A training op if training, else no_op.
    """
    d_out = self._compute_discriminator_out(
        nodes,
        create_summaries,
        gradients_to_generator=False)  # Only train discriminator!
    d_loss = self._create_gan_loss(d_out, create_summaries, mode="d_loss")

    if not self.training:
      return tf.no_op()

    self._add_hook(tf.train.NanTensorHook(d_loss))

    # Getting the variables here because they don't exist before calling
    # _compute_discriminator_out for the first time!
    disc_vars = self._discriminator.trainable_variables

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
      with tf.name_scope("min_d"):
        train_op_d = optimizer.minimize(
            d_loss, self._global_step_disc, disc_vars)
        return train_op_d
コード例 #27
0
    def _compressor_op(self, matrix_compressor, a_matrix_tfvar):
        """Creates compressor op based on matrix_compressor.

    Meant to create the factors once at begin_compression_step.

    Args:
      matrix_compressor: specifies the matrix compressor object.
      a_matrix_tfvar: the tf tensor to be compressed.
    """
        # py_func is not supported on TPU so need non py_func implementation
        use_tpu = self._spec.use_tpu
        # Seeing some tf.py_func error because of which the
        # following may be needed, so enforcing TF operation updates.
        if use_tpu:
            [b_matrix_out, c_matrix_out
             ] = matrix_compressor.tpu_matrix_compressor(a_matrix_tfvar)
        else:
            [b_matrix_out, c_matrix_out] = tf.compat.v1.py_func(
                matrix_compressor.static_matrix_compressor, [a_matrix_tfvar],
                [tf.float32, tf.float32])

        b_matrix_assign_op = tf.compat.v1.assign(self.b_matrix_tfvar,
                                                 b_matrix_out,
                                                 name='b_matrix_assign_op')
        c_matrix_assign_op = tf.compat.v1.assign(self.c_matrix_tfvar,
                                                 c_matrix_out,
                                                 name='c_matrix_assign_op')
        with tf.control_dependencies([b_matrix_assign_op, c_matrix_assign_op]):
            logging.info('Updating b_matrix,c_matrix.')
            return tf.no_op('compresor_b_matrix_and_c_matrix_update')
コード例 #28
0
    def _apply_dense(self, grad, var):
        # We actually apply grads in _finish. This function is used
        # to record intermediate variables related to the individual gradients
        # which we eventually combine in _finish to obtain global statistics
        # (e.g. the L1 norm of the full gradient).

        self.grads[var] = grad

        betting_fraction = self.get_slot(var, OUTER_BETTING_FRACTION)
        self.betting_fraction_dot_product_deltas[var] = tf.reduce_sum(
            betting_fraction * grad)

        # Wealth increases by -g \cdot w where w is the parameter value.
        # Since w = Wealth * v with betting fraction v, we can write
        # the wealth increment as -(g \cdot v) Wealth.
        # TODO(cutkosky): at one point there was a bug in which epsilon
        # was not added here. It seemed performance may have degraded
        # somewhat after fixing this. Find out why this would be.
        wealth_delta = -self.betting_fraction_dot_product_deltas[
            var] * self._get_non_slot(OUTER_WEALTH)
        self.wealth_deltas[var] = wealth_delta

        self.grad_norms[var] = tf.norm(grad, 1)

        return tf.no_op()
コード例 #29
0
 def compute_gradients(self, loss):
     grads = self.opt.compute_gradients(loss, self.var_list)
     updates = [self.accum_vars[v].assign_add(g) for (g, v) in grads]
     updates.append(self.total_loss.assign_add(loss))
     updates.append(self.count_loss.assign_add(1.0))
     with tf.control_dependencies(updates):
         return tf.no_op()
コード例 #30
0
 def all_update_op(self):
   """Returns the combine update tf OP."""
   # TODO(nishanthd): implement all_update_op logic inside the wrapper
   with tf.compat.v1.name_scope(self._scope):
     with tf.control_dependencies(self._update_ops):
       logging.info('Updating all compression_ops.')
       self._all_update_op = tf.no_op('update_all_compression_ops')
   return self._all_update_op