Пример #1
0
 def __init__(self, model, **kwargs):
     super(LanguageModelTrainerTf, self).__init__()
     self.model = model
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     self.global_step, self.train_op = optimizer(self.loss, **kwargs)
     self.nsteps = kwargs.get('nsteps', 500)
Пример #2
0
 def __init__(self, model, **kwargs):
     super(LanguageModelTrainerTf, self).__init__()
     self.model = model
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     self.global_step, self.train_op = optimizer(self.loss, **kwargs)
     self.nsteps = kwargs.get('nsteps', 500)
Пример #3
0
 def __init__(self, model, **kwargs):
     super(TaggerTrainerTf, self).__init__()
     self.loss = model.create_loss()
     self.model = model
     span_type = kwargs.get('span_type', 'iob')
     verbose = kwargs.get('verbose', False)
     self.evaluator = TaggerEvaluatorTf(model, span_type, verbose)
     self.global_step, self.train_op = optimizer(self.loss, **kwargs)
     self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
Пример #4
0
 def __init__(self, model, **kwargs):
     super(TaggerTrainerTf, self).__init__()
     self.loss = model.create_loss()
     self.model = model
     span_type = kwargs.get('span_type', 'iob')
     verbose = kwargs.get('verbose', False)
     self.evaluator = TaggerEvaluatorTf(model, span_type, verbose)
     self.global_step, self.train_op = optimizer(self.loss, **kwargs)
     self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
Пример #5
0
 def __init__(self, model, **kwargs):
     super(Seq2SeqTrainerTf, self).__init__()
     self.sess = model.sess
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     self.model = model
     self.tgt_rlut = kwargs['tgt_rlut']
     self.base_dir = kwargs['basedir']
     self.global_step, self.train_op = optimizer(self.loss, colocate_gradients_with_ops=True, **kwargs)
     self.nsteps = kwargs.get('nsteps', 500)
     self.beam = kwargs.get('beam', 10)
Пример #6
0
 def __init__(self, model, **kwargs):
     super(LanguageModelTrainerTf, self).__init__()
     self.model = model
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     if kwargs.get('eval_mode', False):
         # When reloaded a model creating the training op will break things.
         self.train_op = tf.no_op()
     else:
         self.global_step, self.train_op = optimizer(self.loss, **kwargs)
     self.nsteps = kwargs.get('nsteps', 500)
Пример #7
0
 def __init__(self, model, **kwargs):
     super(Seq2SeqTrainerTf, self).__init__()
     self.sess = model.sess
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     self.model = model
     self.tgt_rlut = kwargs['tgt_rlut']
     self.base_dir = kwargs['basedir']
     self.global_step, self.train_op = optimizer(
         self.loss, colocate_gradients_with_ops=True, **kwargs)
     self.nsteps = kwargs.get('nsteps', 500)
     self.beam = kwargs.get('beam', 10)
Пример #8
0
 def __init__(self, model, **kwargs):
     super(ClassifyTrainerTf, self).__init__()
     self.sess = model.sess
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     self.model = model
     self.global_step, train_op = optimizer(self.loss, colocate_gradients_with_ops=True, **kwargs)
     self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
     decay = kwargs.get('ema_decay', None)
     if decay is not None:
         self.ema = True
         ema_op, self.ema_load, self.ema_restore = _add_ema(model, float(decay))
         with tf.control_dependencies([ema_op]):
             self.train_op = tf.identity(train_op)
     else:
         self.ema = False
         self.train_op = train_op
Пример #9
0
 def __init__(self, model, **kwargs):
     super(ClassifyTrainerTf, self).__init__()
     self.sess = model.sess
     self.loss = model.create_loss()
     self.test_loss = model.create_test_loss()
     self.model = model
     self.global_step, train_op = optimizer(self.loss, colocate_gradients_with_ops=True, **kwargs)
     self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
     decay = kwargs.get('ema_decay', None)
     if decay is not None:
         self.ema = True
         ema_op, self.ema_load, self.ema_restore = _add_ema(model, float(decay))
         with tf.control_dependencies([ema_op]):
             self.train_op = tf.identity(train_op)
     else:
         self.ema = False
         self.train_op = train_op
Пример #10
0
    def __init__(self, model, **kwargs):
        super(Seq2SeqTrainerTf, self).__init__()
        self.sess = model.sess
        if kwargs.get('eval_mode', False):
            # When reloading a model things like `decoder.preds` used in the loss are not present.
            # Trying to create the loss would break things.
            self.loss = tf.no_op()
            self.test_loss = tf.no_op()
        else:
            self.loss = model.create_loss()
            self.test_loss = model.create_test_loss()

        self.model = model
        self.tgt_rlut = kwargs['tgt_rlut']
        self.base_dir = kwargs['basedir']
        if kwargs.get('eval_mode', False):
            # When reloaded a model creating the training op will break things.
            self.train_op = tf.no_op()
        else:
            self.global_step, self.train_op = optimizer(
                self.loss, colocate_gradients_with_ops=True, **kwargs)
        self.nsteps = kwargs.get('nsteps', 500)
        self.beam = kwargs.get('beam', 10)