Exemplo n.º 1
0
        def optimizer_function(loss, *_):
            if not self.trainable_weights:
                raise ValueError(
                    "Model must have at least one trainable parameter.")

            opt = self._get_optimizer()
            return pipelining_ops.OptimizerFunctionOutput(opt, loss)
Exemplo n.º 2
0
 def optimizer_function(learning_rate, total_loss, *args):
     optimizer = get_optimizer(learning_rate, opts['loss_scaling'],
                               opts['replicas'], opts)
     if opts["replicas"] > 1:
         optimizer = ipu.optimizers.cross_replica_optimizer.CrossReplicaOptimizer(
             optimizer)
     return pipelining_ops.OptimizerFunctionOutput(
         optimizer, total_loss * opts['loss_scaling'])
Exemplo n.º 3
0
    def optimizer_function(self, lr, loss, kl_cls, tgt):
        optimizer_type = self.config['optimizer'].lower()
        loss = self.config['loss_scale'] * loss
        if optimizer_type == 'sgd':
            lr = lr / self.config['loss_scale']
            optimizer = tf.compat.v1.train.GradientDescentOptimizer(lr)
        elif optimizer_type == 'sgdm':
            optimizer = tf.compat.v1.train.MomentumOptimizer(lr, 0.9)
        elif optimizer_type == 'adam':
            optimizer = tf.compat.v1.train.AdamOptimizer(lr,
                                                         beta1=0.9,
                                                         beta2=0.98,
                                                         epsilon=1e-6)
        elif optimizer_type == 'adaml':
            optimizer = AdamLossScalingOptimizer(lr,
                                                 self.config['loss_scale'],
                                                 weights_dtype=tf.float32)
        else:
            raise ValueError(f"Optimizer {optimizer_type} not implemented.")
        if self.config['replica'] > 1:
            optimizer = ipu.optimizers.cross_replica_optimizer.CrossReplicaOptimizer(
                optimizer)

        return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)
Exemplo n.º 4
0
 def optimizer_function(global_step, loss):
   lr = 0.1 - 0.001 * global_step
   opt = gradient_descent.GradientDescentOptimizer(lr)
   return pipelining_ops.OptimizerFunctionOutput(opt, loss)
Exemplo n.º 5
0
 def optimizer_function(loss):
   opt = gradient_descent.GradientDescentOptimizer(learning_rate)
   return pipelining_ops.OptimizerFunctionOutput(opt, loss)
Exemplo n.º 6
0
 def optimizer_function(loss, _, __, lr):
     optimizer = get_optimizer(opts)(lr)
     return pipelining_ops.OptimizerFunctionOutput(
         optimizer, loss * opts["loss_scaling"])
Exemplo n.º 7
0
 def optimizer_function(lr, loss):
     optimizer = GradientDescentOptimizer(lr)
     return pipelining_ops.OptimizerFunctionOutput(optimizer, loss)