Exemplo n.º 1
0
def train_epoch(model,
                optimizer,
                dataset,
                epoch_index: int,
                batch_index: tf.Variable,
                log_freq: int = 250,
                writer=None):

    to_fine_tune = [v for v in model.trainable_variables]

    epoch_metrics = make_metric_dict(
        ["Localization", "Confidence", "WeightedTotal"])
    era_metrics = make_metric_dict(
        ["Localization", "Confidence", "WeightedTotal"])

    for (_, met) in era_metrics.items():
        met.reset_states()

    epoch_samples = 0
    era_samples = 0
    _log("Started new training epoch")

    batch_start = batch_index.numpy()
    for batch in dataset:
        batch_index.assign_add(1)
        epoch_samples += len(batch["image"])
        era_samples += len(batch["image"])

        keys = [
            "cls_targets", "cls_weights", "reg_targets", "reg_weights",
            "matched"
        ]
        images, shapes = model.preprocess(batch["image"])
        model.provide_groundtruth_direct(**{k: batch[k] for k in keys})
        with tf.GradientTape() as tape:
            prediction_dict = model.predict(images, shapes)
            loss_dict = model.loss(prediction_dict)
        gradients = tape.gradient(loss_dict["WeightedTotal"], to_fine_tune)
        optimizer.apply_gradients(zip(gradients, to_fine_tune))
        update_metric_dict(epoch_metrics, loss_dict)
        update_metric_dict(era_metrics, loss_dict)

        if (batch_index - batch_start) % log_freq == 0:
            _log(f"Completed {batch_index - batch_start} batches")
            if writer:
                l_dict = metric2scalar_dict(
                    era_metrics,
                    prefix=f"Loss/Train/Last_{log_freq}_Batches",
                    v_func=lambda v: v / era_samples,
                    reset_states=True)
                write_scalars(writer, l_dict, step=batch_index)

    if writer:
        l_dict = metric2scalar_dict(epoch_metrics,
                                    prefix=f"Loss/Train/Epoch",
                                    v_func=lambda v: v / epoch_samples,
                                    reset_states=True)
        write_scalars(writer, l_dict, step=epoch_index)
Exemplo n.º 2
0
 def _decay_weight_op(self, variable: tf.Variable, learning_rate: float, apply_state: dict):
     """apply the weight operation on variables"""
     do_decay = self._do_use_weight_decay(variable.name)
     if do_decay:
         return variable.assign_add(
             learning_rate * apply_state['weight_decay_rate'] * variable,
             use_locking=self._use_locking
         )
     return tf.no_op()
Exemplo n.º 3
0
floats = Variable([3.14, 2.72], dtype=tf.float64)
ints = tf.Variable([1, 2, 3], tf.int32)
complexs = tf.Variable([-1.2j, 1.3 - 4j], tf.complex64)

# initialize var with shape
print(tf.Variable(tf.constant(4.2, shape=(3, 3))))

# Change vars
a = Variable(0.)
b = a + 1
print(a)
print(b)

print(type(b))

a.assign_add(1)
print(a)

a.assign_sub(4)
print(a)

# Tensors

# Constant tensor

c = tf.constant([[1, 1, 1], [2, 3, 3], [4, 4, 4]])
print(c.dtype, c.shape)

print(c.numpy())

d = tf.constant([[1, 1, 1], [2, 3, 3], [4, 4, 4]], dtype=tf.float32)