示例#1
0
    def _train_step(self, X, Y, meta1):

        try:

            model = self.model
            optimizer = self.optimizer

            # Step 1 - Calculate loss and gradients
            with tf.GradientTape() as tape:
                t2 = tf.timestamp()
                y_predict = model(X, training=True)
                t2_ = tf.timestamp()

                loss_vals = self._train_loss(Y, y_predict, meta1)

            # tf.print(' - loss_Vals: ', loss_vals)
            t3 = tf.timestamp()
            all_vars = model.trainable_variables
            gradients = tape.gradient(loss_vals, all_vars)  # dL/dW
            optimizer.apply_gradients(zip(gradients, all_vars))
            t3_ = tf.timestamp()

            return t2_ - t2, t3 - t2_, t3_ - t3

        except:
            traceback.print_exc()
            return None, None, None
示例#2
0
    def f(params_1d):
        """A function that can be used by tfp.optimizer.lbfgs_minimize.
        This function is created by function_factory.
        Args:
           params_1d [in]: a 1D tf.Tensor.
        Returns:
            A scalar loss and the gradients w.r.t. the `params_1d`.
        """
        # use GradientTape so that we can calculate the gradient of loss w.r.t. parameters
        with tf.GradientTape() as tape:
            # update the parameters in the model
            assign_new_model_parameters(params_1d)
            # calculate the loss
            loss_value = loss()

        # calculate gradients and convert to 1D tf.Tensor
        grads = tape.gradient(loss_value, model.trainable_variables)
        grads = tf.dynamic_stitch(idx, grads)

        # print out iteration & loss
        f.iter.assign_add(1)

        if f.iter % 300 == 0:
            elapsed = tf.timestamp() - f.start_time

            tf.print("Iter:", f.iter // 3, "loss:", loss_value, "time:",
                     elapsed)
            f.start_time.assign(tf.timestamp())

        # store loss value so we can retrieve later
        tf.py_function(f.history.append, inp=[loss_value], Tout=[])

        return loss_value, grads
 def optimize_body(it, last_time):
     loss = loss_fn()
     (gradient,
      variable), = optimizer.compute_gradients(loss, var_list=[images])
     if blur:
         blur_diff = blur(images) - images
         to_blur = tf.equal(it % blur_each, 0)
         gradient = tf.cond(
             to_blur,
             lambda: gradient - blur_diff /
             learning_rate,  # gradient is subtracted so blur_diff must be negative
             lambda: gradient)
     optimization = optimizer.apply_gradients(
         [(gradient, variable)],
         global_step=tf.train.get_or_create_global_step())
     # image display-time computations
     time_delta = tf.timestamp() - last_time
     was_long_enough = tf.greater(time_delta, 1 / show_rate)
     show_image_op = tf.cond(
         was_long_enough,
         lambda: tf.py_func(show_image_wrapper, [images], tf.float32),
         lambda: tf.constant(0, dtype=tf.float32))
     with tf.control_dependencies([optimization]):
         it = it + 1
         if show_img:
             with tf.control_dependencies([show_image_op]):
                 new_time = tf.cond(
                     was_long_enough,
                     lambda: tf.timestamp(
                     ),  # if showed image, then update
                     lambda: last_time)  # else leave last_time as was
                 return it, new_time
         else:
             return it, last_time
示例#4
0
def RC(m, k, n, kern_para_a, kern_para_b, num_devices, a_shards, b_shards):
    c = [None] * kern_para_a
    c_final = [None] * kern_para_a
    c_final_dist = [None] * kern_para_a
    for i in range(kern_para_a):
        c[i] = [None] * kern_para_b
        c_final_dist[i] = [None] * kern_para_b
        for j in range(kern_para_b):
            gid = i * kern_para_b + j
            with tf.device('/device:gpu:{}'.format(gid % num_devices)):
                c[i][j] = tf.matmul(a_shards[i][j], b_shards[i][j])


#                    tf.print(c[i][j].device)

        with tf.device('/device:gpu:{}'.format(
            (i * kern_para_b) % num_devices)):
            c_final[i] = tf.concat(c[i], axis=1)

        for j in range(kern_para_b):
            gid = i * kern_para_b + j
            with tf.device('/device:gpu:{}'.format(gid % num_devices)):
                #c_final_dist[i][j] = c_final[i]
                a_shards[i][j].assign(c_final[i])
                ret_val = tf.constant(i + 1)
    if ret_val > 0:
        tf.print("Time taken: {}".format(tf.timestamp()))
        #tot_time = tf.timestamp() - start
    else:
        tf.print("Time taken: {}".format(tf.timestamp()))

    return c_final_dist
示例#5
0
    def GetLoss(**params):
        tf.random.set_seed(args.seed)
        model = pm.HHModel(var_pos, cfg_mean_std, cfg_min_max, params)
        model.call(X[0:1, :, :])
        opt = getattr(tf.keras.optimizers, params['optimizers'])(
            learning_rate=10**params['learning_rate_exp'])

        model.compile(loss='binary_crossentropy',
                      optimizer=opt,
                      weighted_metrics=[pm.sel_acc_2])
        model.build(X.shape)
        print(tf.timestamp(name='timestamp'))
        total_evt = X.shape[0]
        evt_training = int(total_evt * (1 - args.val_split))
        evt_val = total_evt - evt_training
        print('Events to train: ', evt_training,
              'Events to validate during training: ', evt_val)
        history = model.fit(X,
                            Y,
                            validation_split=args.val_split,
                            epochs=args.n_epochs,
                            batch_size=params['batch_size'],
                            verbose=0)
        print(tf.timestamp(name='timestamp'))
        tf.keras.backend.clear_session()

        return np.amax(history.history['val_sel_acc_2'])
示例#6
0
def log_deferred(op, log_id, every_n=1, first_n=None):
    """Helper method inserting compliance logging ops.

  Note: This helper is not guaranteed to be efficient, as it will insert ops
        and control dependencies. If this proves to be a bottleneck, submitters
        may wish to consider other methods such as extracting values from an
        .events file.

  Args:
    op: A tf op to be printed.
    log_id: a uuid provided by the logger in mlperf_log.py
    every_n: If repeat is True, with what frequency should the input op be '
             logged. If repeat is False, this argument is ignored.
    first_n: Only log this many values. This arg does not interact with every_n.
             The first_n refers to the first n that would have been logged.
  """

    prefix = ":::MLPv0.5.0 [{}]".format(log_id)
    if not first_n is not None and first_n == 1:
        return tf.compat.v1.Print(op, [tf.timestamp(), op],
                                  message=prefix,
                                  first_n=1)

    counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
                          aggregation=tf.VariableAggregation.MEAN)
    increment = tf.compat.v1.assign_add(counter, 1, use_locking=True)
    return tf.cond(
        pred=tf.equal(tf.math.mod(increment, every_n), 0),
        true_fn=lambda: tf.compat.v1.Print(
            op, [tf.timestamp(), op], message=prefix, first_n=first_n),
        false_fn=lambda: op)
示例#7
0
  def __init__(self,
               nq_server: server.NQServer,
               state: Optional[types.EnvState] = None,
               random_state: Optional[np.random.RandomState] = None,
               training: bool = True,
               stop_after_seeing_new_results: bool = False):
    super().__init__()
    self.nq_server = nq_server
    self.training = training

    self.first_time = True  # Used for initial debug logging

    self.stop_after_seeing_new_results = stop_after_seeing_new_results

    self.descriptor = get_descriptor()
    self.grammar = self.descriptor.extras['grammar']
    self.tokenizer = self.descriptor.extras['tokenizer']
    self.action_space = len(self.grammar.productions())

    self.idf_lookup = utils.IDFLookup.get_instance(
        path=common_flags.IDF_LOOKUP_PATH.value)

    trie_start_time = tf.timestamp()
    if common_flags.GLOBAL_TRIE_PATH.value is None:
      self.global_trie = pygtrie.Trie.fromkeys((x for x in map(
          functools.partial(
              to_action_tuple, grammar=self.grammar, tokenizer=self.tokenizer),
          self.idf_lookup.lookup) if x))
      self._logging_info('Built trie of size %s in %s s', len(self.global_trie),
                         tf.timestamp() - trie_start_time)
    else:
      with tf.io.gfile.GFile(common_flags.GLOBAL_TRIE_PATH.value,
                             'rb') as trie_f:
        self.global_trie = pickle.load(trie_f)
      self._logging_info('Restored trie of size %s in %s s',
                         len(self.global_trie),
                         tf.timestamp() - trie_start_time)

    # The value of the global steps in the learner is updated in step()
    self.training_steps = 0

    # Trie for the current results.  We only build this the first time after
    # a new set of results is obtained.  A value of `None` indicates that for
    # the current set of results, it has not been built yet.
    self.known_word_tries = None  # type: Optional[state_tree.KnownWordTries]
    self.valid_word_actions = None  # type: Optional[state_tree.ValidWordActions]
    self.use_rf_restrict = False

    self.state = state
    if state and state.tree is None:
      self.state.tree = state_tree.NQStateTree(grammar=self.grammar)

    self.bert_config: configs.BertConfig = self.descriptor.extras['bert_config']
    self.sequence_length: int = self.descriptor.extras['sequence_length']
    self.action_history = []
    self.n_episode = 0

    self._rand = np.random.RandomState()
    if random_state:
      self._rand.set_state(random_state)
示例#8
0
 def _execute(self, dataset):
     start = tf.timestamp()
     pred = self._model.predict(dataset)
     end = tf.timestamp()
     time = tf.math.subtract(end, start)
     pred = tf.image.convert_image_dtype(pred, dtype=tf.uint8, saturate=True)
     return pred.numpy(), time.numpy()
示例#9
0
 def valid():
     epoch_loss = []
     epoch_metrics = defaultdict(list)
     start_time = tf.timestamp()
     last_it = 0.
     for it, inputs in enumerate(valid_ds.repeat(1)):
         it = tf.cast(it, tf.float32)
         _loss, _metrics = step(it, inputs, training=False)
         assert isinstance(_metrics, dict), \
           "Metrics must be instance of dictionary"
         # store for calculating average
         epoch_loss.append(_loss)
         for k, v in _metrics.items():
             epoch_metrics[k].append(v)
         # print log
         end_time = tf.timestamp()
         if end_time - start_time >= logging_interval:
             it_per_sec = tf.cast(
                 (it - last_it) /
                 tf.cast(end_time - start_time, tf.float32), tf.int32)
             tf.print(" ",
                      log_tag,
                      "[Valid] #",
                      it + 1,
                      " ",
                      it_per_sec,
                      "(it/s)",
                      sep="",
                      output_stream=output_stream)
             start_time = tf.timestamp()
             last_it = it
     self.valid_loss_epoch.append(epoch_loss)
     self.valid_metrics_epoch.append(epoch_metrics)
     return tf.reduce_mean(epoch_loss, axis=0), \
       {k: tf.reduce_mean(v, axis=0) for k, v in epoch_metrics.items()}
示例#10
0
文件: run_tf2.py 项目: Lummetry/DS102
def graph_train_and_validate(model, train_data, test_data, epochs, batch_size):
    x_trn, y_trn = train_data
    x_tst, y_tst = test_data
    n_obs = x_trn.shape[0]

    trn_ds = tf.data.Dataset.from_tensor_slices((x_trn, y_trn))
    trn_shuffled_ds = trn_ds.shuffle(buffer_size=n_obs)
    trn_batched_ds = trn_shuffled_ds.batch(batch_size)
    trn_ready_ds = trn_batched_ds.prefetch(1)
    tf.print(epochs, " epochs, batch-size ", batch_size, " ds: ", trn_ready_ds)
    loss_func = tf.keras.losses.get(model.loss)
    opt = tf.keras.optimizers.get(model.optimizer)

    t_start = tf.timestamp()
    for epoch in range(epochs):
        epoch_loss = 0.
        updates = 0
        for i, (tf_x_batch, tf_y_batch) in enumerate(trn_ready_ds):
            with tf.GradientTape() as tape:
                tf_yhat = model(tf_x_batch)
                tf_loss = loss_func(tf_y_batch, tf_yhat)
            lst_grads = tape.gradient(tf_loss, model.trainable_weights)
            opt.apply_gradients(zip(lst_grads, model.trainable_weights))
            epoch_loss += tf_loss
            updates += 1
    t_end = tf.timestamp()
    return t_end - t_start
示例#11
0
 def call(self, x, training=None):
     t0 = tf.timestamp()
     x = self.stem(x, training=training)
     t1 = tf.timestamp()
     front = self.stages[0]
     stage1_output = front(x, training=training)
     t2 = tf.timestamp()
     transition12 = self.transitions[0]
     stage2 = self.stages[1]
     stage1_transitions = transition12([stage1_output], training=training)
     t3 = tf.timestamp()
     stage2_outputs = stage2(stage1_transitions, training=training)
     t4 = tf.timestamp()
     transition23 = self.transitions[1]
     stage3 = self.stages[2]
     stage2_transitions = transition23(stage2_outputs, training=training)
     t5 = tf.timestamp()
     stage3_outputs = stage3(stage2_transitions, training=training)
     t6 = tf.timestamp()
     transition34 = self.transitions[2]
     stage4 = self.stages[3]
     stage3_transitions = transition34(stage3_outputs, training=training)
     t7 = tf.timestamp()
     stage4_outputs = stage4(stage3_transitions, training=training)
     t8 = tf.timestamp()
     if self.include_top:
         # classification
         y = self.cls_head(stage4_outputs)
         t9 = tf.timestamp()
         return y
     else:
         return stage4_outputs
示例#12
0
    def fit_epoch(self, train_dataset, valid_dataset):

        for batch in train_dataset:

            t0 = tf.timestamp()
            with tf.GradientTape() as g:
                losses, _ = self.loss_and_output(batch)
                train_loss = tf.reduce_mean(losses)
            grads = g.gradient(train_loss, self.trainable_weights)
            self.opt.apply_gradients(zip(grads, self.trainable_weights))
            t1 = tf.timestamp()

            if self.step % 100 == 0:

                valid_batch = next(valid_dataset)
                valid_losses, valid_output = self.loss_and_output(valid_batch)
                valid_loss = tf.reduce_mean(valid_losses)
                valid_ppx = tf.reduce_mean(tf.math.exp(valid_losses))

                tf.print("step", self.step)
                tf.print("  train step time", t1 - t0)
                tf.print("  train_loss", train_loss)
                tf.print("  valid_loss", valid_loss)

                with self.train_writer.as_default():
                    tf.summary.scalar("loss", train_loss, step=self.step)
                with self.valid_writer.as_default():
                    tf.summary.scalar("loss", valid_loss, step=self.step)
                    tf.summary.scalar("perplexity", valid_ppx, step=self.step)

            self.step.assign_add(1)
示例#13
0
 def sleep_and_multiply(ordered_dict):
   init_time = tf.timestamp()
   n_iters = 0
   # This is a busy-sleep; TF exposes no direct sleep ops.
   while tf.timestamp() - init_time < sleep_time:
     n_iters += 1
   return ordered_dict['x'] * ordered_dict['y'] * tf.math.minimum(
       n_iters, 10)
示例#14
0
 def train_on_batch(self, batch_input):
     train_start = tf.timestamp()
     batch_output = self._forward_pass(batch_input)
     _ = self._keras_model.optimizer.get_updates(
         loss=batch_output.loss, params=self.trainable_variables)
     train_end = tf.timestamp()
     self._training_timing.log_time(train_end - train_start)
     return batch_output
示例#15
0
 def _execute_bicubic(self, dataset):
     for batch in dataset:
         start = tf.timestamp()
         up_size = tf.math.multiply(tf.slice(tf.shape(batch), [1], [2]), 4)
         pred = tf.image.resize(batch, size=up_size, method=tf.image.ResizeMethod.BICUBIC)
         end = tf.timestamp()
         time = tf.math.subtract(end, start)
         pred = tf.image.convert_image_dtype(pred, dtype=tf.uint8, saturate=True)
         return pred.numpy(), time.numpy()
示例#16
0
 def initialize(self):
   if self.seed is None:
     return tf.cast(
         tf.stack([
             tf.math.floor(tf.timestamp() * 1e6),
             tf.math.floor(tf.math.log(tf.timestamp() * 1e6))
         ]),
         dtype=tf.int64)
   else:
     return tf.constant(self.seed, dtype=tf.int64, shape=(2,))
示例#17
0
 def sub_sample():
     indices = tf.repeat(tf.range(tf.shape(histogram)[0]), histogram)
     seed = tf.cast(tf.stack([tf.timestamp() * 1e6,
                              tf.timestamp() * 1e6]),
                    dtype=tf.int64)
     samples = tf.random.stateless_uniform(tf.shape(indices), seed)
     _, sampled_idx = tf.math.top_k(samples, k=sample_num, sorted=False)
     ind = tf.expand_dims(tf.gather(indices, sampled_idx), axis=1)
     upd = tf.ones(tf.shape(sampled_idx), dtype=tf.int32)
     return tf.scatter_nd(indices=ind,
                          updates=upd,
                          shape=tf.shape(histogram))
示例#18
0
 def distributed_train_epoch(dataset):
     t0 = tf.timestamp()
     for one_batch in dataset:
         per_replica_loss = strategy.experimental_run_v2(
             train_step, args=(one_batch, ))
         strategy.reduce(tf.distribute.ReduceOp.SUM,
                         per_replica_loss,
                         axis=None)
         delta_t = tf.strings.as_string((tf.timestamp() - t0) * 1000,
                                        precision=1)
         tf.print(delta_t, 'ms/step')
         t0 = tf.timestamp()
 def discretize_tensor(x):
     seed = tf.cast(tf.stack([tf.timestamp() * 1e6,
                              tf.timestamp() * 1e6]),
                    dtype=tf.int64)
     scaled_x = tf.divide(tf.cast(x, tf.float32), step_size)
     prob_x = scaled_x - tf.cast(tf.floor(scaled_x), tf.float32)
     random_x = tf.random.stateless_uniform(x.shape,
                                            seed=seed,
                                            dtype=tf.float32)
     discretized_x = tf.where(tf.less_equal(random_x, prob_x),
                              tf.math.ceil(scaled_x),
                              tf.math.floor(scaled_x))
     return tf.cast(discretized_x, tf.int32)
示例#20
0
 def distinct():
     indices = tf.squeeze(
         tf.cast(tf.where(tf.not_equal(histogram, 0)), tf.int32))
     seed = tf.cast(tf.stack([tf.timestamp() * 1e6,
                              tf.timestamp() * 1e6]),
                    dtype=tf.int64)
     samples = tf.random.stateless_uniform(tf.shape(indices), seed)
     _, sampled_idx = tf.math.top_k(samples, k=sample_num, sorted=False)
     ind = tf.expand_dims(tf.gather(indices, sampled_idx), axis=1)
     upd = tf.ones(tf.shape(sampled_idx), dtype=tf.int32)
     return tf.scatter_nd(indices=ind,
                          updates=upd,
                          shape=tf.shape(histogram))
示例#21
0
    def f(params_1d):
        """A function that can be used by tfp.optimizer.lbfgs_minimize.
        This function is created by function_factory.
        Args:
           params_1d [in]: a 1D tf.Tensor.
        Returns:
            A scalar loss and the gradients w.r.t. the `params_1d`.
        """
        # use GradientTape so that we can calculate the gradient of loss w.r.t. parameters
        with tf.GradientTape() as tape:
            # update the parameters in the model
            assign_new_model_parameters(params_1d)
            # calculate the loss
            loss_value = loss()

        # calculate gradients and convert to 1D tf.Tensor
        grads = tape.gradient(loss_value, obj.variables)

        # Extracting the correct gradient for each set of variables
        if obj.isAdaptive:
            grads_lambdas = grads[
                dict_variables['nn_weights']:dict_variables['lambdas']]
            grads_lambdas_neg = [-x for x in grads_lambdas]
            grads[dict_variables['nn_weights']:
                  dict_variables['lambdas']] = grads_lambdas_neg

        grads = tf.dynamic_stitch(idx, grads)

        # print out iteration & loss
        f.iter.assign_add(1)

        if f.iter % 30 == 0:
            elapsed = tf.timestamp() - f.start_time

            tf.print(
                f'LBFGS iter {f.iter // 3} ->   loss:{loss_value:.2e}   time: {elapsed:.2f} seconds'
            )
            f.start_time.assign(tf.timestamp())

        # store loss value so we can retrieve later
        tf.py_function(f.history.append, inp=[loss_value], Tout=[])

        if loss_value < obj.min_loss['l-bfgs']:
            # Keep the information of the best model trained (lower loss function value)
            obj.best_model['l-bfgs'] = obj.u_model  # best model
            obj.min_loss['l-bfgs'] = loss_value.numpy()  # loss value
            obj.best_epoch['l-bfgs'] = f.iter.numpy()  # best epoch
            obj.best_diff['l-bfgs'] = obj.diffusion[0].numpy()

        return loss_value, grads
示例#22
0
def train(epoch, model, train_dataset, test_dataset):
    element_num = tf.data.experimental.cardinality(train_dataset)
    tf.print(model.metrics_format, output_stream=model.metrics_path)
    start = tf.timestamp()
    for i, (data, label) in train_dataset.repeat(epoch).enumerate():
        model.train_step(data, label)
        if tf.equal(tf.math.floormod(i + 1, element_num), 0):
            end = tf.timestamp()
            train_loss, train_acc, train_pre, train_rec, train_auc, train_mae, train_rmse = model.metrics_loss.result(
            ), model.metrics_acc.result(), model.metrics_pre.result(
            ), model.metrics_rec.result(), model.metrics_auc.result(
            ), model.metrics_mae.result(), model.metrics_rmse.result()
            model.resetMetrics()

            test(model, test_dataset)
            test_loss, test_acc, test_pre, test_rec, test_auc, test_mae, test_rmse = model.metrics_loss.result(
            ), model.metrics_acc.result(), model.metrics_pre.result(
            ), model.metrics_rec.result(), model.metrics_auc.result(
            ), model.metrics_mae.result(), model.metrics_rmse.result()
            model.resetMetrics()

            tf.print(tf.math.floordiv(i + 1, element_num),
                     end - start,
                     train_loss,
                     train_acc,
                     train_pre,
                     train_rec,
                     train_auc,
                     train_mae,
                     train_rmse,
                     test_loss,
                     test_acc,
                     test_pre,
                     test_rec,
                     test_auc,
                     test_mae,
                     test_rmse,
                     sep=',',
                     output_stream=model.metrics_path)

            tf.print("epoch: ", tf.math.floordiv(i + 1, element_num), "time: ",
                     end - start, "train_loss: ", train_loss, "train_acc: ",
                     train_acc, "train_pre: ", train_pre, "train_rec: ",
                     train_rec, "train_auc: ", train_auc, "train_mae: ",
                     train_mae, "train_rmse: ", train_rmse, "test_loss: ",
                     test_loss, "test_acc: ", test_acc, "test_pre: ", test_pre,
                     "test_rec: ", test_rec, "test_auc: ", test_auc,
                     "test_mae: ", test_mae, "test_rmse: ", test_rmse)
            start = tf.timestamp()
示例#23
0
    def __init__(
            self,
            active=True,
            scale=1.,
            print_loss=False,
            print_batch_time=False,
            return_lossval=False,
            print_time=False,  #compat, has no effect
            **kwargs):
        super(LossLayerBase, self).__init__(**kwargs)

        if print_time:
            print(
                "print_time has no effect and is only for compatibility purposes"
            )

        self.active = active
        self.scale = scale
        self.print_loss = print_loss
        self.print_batch_time = print_batch_time
        self.return_lossval = return_lossval
        with tf.init_scope():
            now = tf.timestamp()
        self.time = tf.Variable(-now,
                                name=self.name + '_time',
                                trainable=False)
示例#24
0
    def train_potentials(self):
        conf = self.conf
        while int(self.potential_step) < conf['potential_total_epochs']:
            potential_obj, reg_total = self.train_potential_step()
            elapsed_time = tf.timestamp() - self.start_time
            step_int = int(self.potential_step)

            if step_int % conf['print_frequency'] == 0:
                print(
                    'Potential Step: {} | Obj: {:4E} | Reg: {:4E} | Elapsed: {:.2f}s'
                    .format(step_int, float(potential_obj), float(reg_total),
                            float(elapsed_time)))

            if step_int % conf['log_frequency'] == 0:
                with self.summary_writer.as_default():
                    tf.summary.scalar('potential_obj',
                                      potential_obj,
                                      step=self.potential_step)

            if step_int % conf['ckpt_save_period'] == 0:
                save_path = self.potential_ckpt_manager.save()
                print('Saved checkpoint for potential step {} at {}'.format(
                    step_int, save_path))

            if step_int % conf['val_frequency'] == 0:
                self.validate_potential_training()
示例#25
0
    def train_transport_maps(self):
        if self.conf['moving_averages']['potential_enabled']:
            self.potential_MA.swap_in_averages()
        conf = self.conf
        while int(self.map_step) < conf['map_total_epochs']:
            map_obj = self.train_all_transport_maps_step()
            self.map_step.assign_add(1)
            elapsed_time = tf.timestamp() - self.start_time
            step_int = int(self.map_step)

            if step_int % conf['print_frequency'] == 0:
                print('Map Step: {} | Obj: {:4E} | Elapsed: {:.2f}s'.format(
                    step_int, float(map_obj), float(elapsed_time)))

            if step_int % conf['log_frequency'] == 0:
                with self.summary_writer.as_default():
                    tf.summary.scalar('map_obj', map_obj, step=self.map_step)

            if step_int % conf['ckpt_save_period'] == 0:
                save_path = self.map_ckpt_manager.save()
                print('Saved checkpoint for map step {} at {}'.format(
                    step_int, save_path))

            if step_int % conf['val_frequency'] == 0:
                self.validate_map_training()
        if self.conf['moving_averages']['potential_enabled']:
            self.potential_MA.swap_out_averages()
示例#26
0
    def one_round_computation(examples):
        """The TFF computation to compute the aggregated IBLT sketch."""
        if secure_sum_bitwidth is not None:
            # Use federated secure modular sum for IBLT sketches, because IBLT
            # sketches are decoded by taking modulo over the field size.
            sketch_sum_fn = secure_modular_sum
            count_sum_fn = secure_sum
        else:
            sketch_sum_fn = intrinsics.federated_sum
            count_sum_fn = intrinsics.federated_sum
        round_timestamp = intrinsics.federated_eval(
            tensorflow_computation.tf_computation(
                lambda: tf.cast(tf.timestamp(), tf.int64)), placements.SERVER)
        clients = count_sum_fn(
            intrinsics.federated_value(1, placements.CLIENTS))
        sketch, count_tensor = intrinsics.federated_map(
            compute_sketch, examples)
        sketch = sketch_sum_fn(sketch)
        count_tensor = count_sum_fn(count_tensor)

        (heavy_hitters, heavy_hitters_unique_counts, heavy_hitters_counts,
         num_not_decoded) = intrinsics.federated_map(decode_heavy_hitters,
                                                     (sketch, count_tensor))
        server_output = intrinsics.federated_zip(
            ServerOutput(
                clients=clients,
                heavy_hitters=heavy_hitters,
                heavy_hitters_unique_counts=heavy_hitters_unique_counts,
                heavy_hitters_counts=heavy_hitters_counts,
                num_not_decoded=num_not_decoded,
                round_timestamp=round_timestamp))
        return server_output
示例#27
0
 def test_scalar_random_seed(self):
     example_type = TensorType(tf.int32)
     sample_computation = sampling._build_sample_value_computation(
         example_type, sample_size=1)
     reservoir_type = sampling._build_reservoir_type(example_type)
     expected_type = FunctionType(parameter=collections.OrderedDict(
         reservoir=reservoir_type, sample=example_type),
                                  result=reservoir_type)
     self.assert_types_identical(sample_computation.type_signature,
                                 expected_type)
     # Get the sentinel seed so that the first call initializes based on
     # timestamp.
     reservoir = sampling._build_initial_sample_reservoir(example_type)
     self.assertAllEqual(reservoir['random_seed'],
                         [sampling.SEED_SENTINEL, sampling.SEED_SENTINEL])
     reservoir = sample_computation(reservoir, 1)
     # The first value of the seed was the timestamp, it should be greater than
     # 1_600_000_000_000 (September 2020) and within 60 seconds of now.
     self.assertGreater(reservoir['random_seed'][0], 1_600_000_000_000)
     self.assertLess(
         tf.cast(tf.timestamp() * 1000.0, tf.int64) -
         reservoir['random_seed'][0], 60)
     # The second value should we a random number. We assert its not the
     # sentinel, though it ccould be with probability 1 / 2**32.
     self.assertNotEqual(reservoir['random_seed'][1],
                         sampling.SEED_SENTINEL)
示例#28
0
    def _log_comm_end(self):
        """
        Log communication end profiling information.

        Returns a tf.print operation that writes profiling information
        for the end of communication to a file in the chrome://tracing
        format.
        """
        profile_base_info = self._trim_last_curly_brace(
            self._get_profile_info("communication", "E"))
        # The chrome://tracing utility uses milliseconds since epoch
        # but timestamp is seconds since epoch.  Multiply by 1e6 to get
        # milliseconds.
        end_timestamp = tf.timestamp() * 1e6
        duration = end_timestamp - self._start_timestamp
        return tf.print(
            profile_base_info,
            ', "ts": ',
            end_timestamp,
            ', "duration": ',
            duration,
            "}",
            sep="",
            output_stream=f"file://{self._profile_filename}",
        )
 def add_noise(v):
     poissons = tf.random.stateless_poisson(
         shape=tf.concat([tf.shape(v), [2]],
                         axis=0),  # Two draws of Poisson.
         seed=tf.cast([tf.timestamp() * 10**6, 0], tf.int64),
         lam=[poisson_lam, poisson_lam],
         dtype=tf.int64)
     return v + tf.cast(poissons[..., 0] - poissons[..., 1], v.dtype)
示例#30
0
 def initialize_seed():
     """Generate a seed based on the current millisecond timestamp."""
     # tf.timestamp() returns fractional second, which will be quantized
     # into a tf.int64 value for the random state seed.
     scale_factor = 1_000_000.0
     quantized_fractional_seconds = tf.cast(tf.timestamp() * scale_factor,
                                            tf.int64)
     return tf.fill(dims=(2, ), value=quantized_fractional_seconds)