Exemple #1
0
    def train(self, training_instances, validation_instances=None, metrics=None):
        id_tag = (self.id + ': ') if self.id else ''
        if self.options.verbosity >= 2:
            print(id_tag + 'Training priors')
        self.train_priors(training_instances, listener_data=self.options.listener)

        self.dataset = training_instances
        xs, ys = self._data_to_arrays(training_instances, init_vectorizer=True)
        self._build_model()

        if self.options.verbosity >= 2:
            print(id_tag + 'Training conditional model')
        summary_path = config.get_file_path('losses.tfevents')
        if summary_path:
            writer = summary.SummaryWriter(summary_path)
        else:
            writer = None
        progress.start_task('Iteration', self.options.train_iters)
        for iteration in range(self.options.train_iters):
            progress.progress(iteration)
            self.model.fit(xs, ys, batch_size=self.options.batch_size,
                           num_epochs=self.options.train_epochs,
                           summary_writer=writer, step=iteration * self.options.train_epochs)
            validation_results = self.validate(validation_instances, metrics, iteration=iteration)
            if writer is not None:
                step = (iteration + 1) * self.options.train_epochs
                self.on_iter_end(step, writer)
                for key, value in validation_results.iteritems():
                    tag = 'val/' + key.split('.', 1)[1].replace('.', '/')
                    writer.log_scalar(step, tag, value)
        writer.flush()
        progress.end_task()
Exemple #2
0
    def train(self, training_instances, validation_instances=None, metrics=None,
              keep_params=False):
        id_tag = (self.id + ': ') if self.id else ''
        if self.options.verbosity >= 2:
            print(id_tag + 'Training priors')
        self.train_priors(training_instances, listener_data=self.options.listener)

        self.dataset = training_instances
        xs, ys = self._data_to_arrays(training_instances,
                                      init_vectorizer=not hasattr(self, 'model'))
        if not hasattr(self, 'model') or not keep_params:
            if self.options.verbosity >= 2:
                print(id_tag + 'Building model')
            if keep_params:
                warnings.warn("keep_params was passed, but the model hasn't been built; "
                              "initializing all parameters.")
            self._build_model()
        else:
            if not hasattr(self.options, 'reset_optimizer_vars') or \
                    self.options.reset_optimizer_vars:
                if self.options.verbosity >= 2:
                    print(id_tag + 'Resetting optimizer')
                self.model.reset_optimizer()

        if self.options.verbosity >= 2:
            print(id_tag + 'Training conditional model')
        if hasattr(self, 'writer'):
            writer = self.writer
        else:
            summary_path = config.get_file_path('losses.tfevents')
            if summary_path:
                writer = summary.SummaryWriter(summary_path)
            else:
                writer = None
            self.writer = writer

        if not hasattr(self, 'step_base'):
            self.step_base = 0

        progress.start_task('Iteration', self.options.train_iters)
        for iteration in range(self.options.train_iters):
            progress.progress(iteration)
            self.model.fit(xs, ys, batch_size=self.options.batch_size,
                           num_epochs=self.options.train_epochs,
                           summary_writer=writer,
                           step=self.step_base + iteration * self.options.train_epochs)
            validation_results = self.validate(validation_instances, metrics, iteration=iteration)
            if writer is not None:
                step = self.step_base + (iteration + 1) * self.options.train_epochs
                self.on_iter_end(step, writer)
                for key, value in validation_results.iteritems():
                    tag = 'val/' + key.split('.', 1)[1].replace('.', '/')
                    writer.log_scalar(step, tag, value)

        self.step_base += self.options.train_iters * self.options.train_epochs
        writer.flush()
        progress.end_task()
Exemple #3
0
 def __init__(self, module, loss, optimizer, optimizer_params, vectorizer):
     self.get_options()
     self.module = cu(module)
     self.loss = cu(loss)
     self.optimizer_class = optimizer
     self.optimizer_params = optimizer_params
     self.build_optimizer()
     self.vectorizer = vectorizer
     summary_path = config.get_file_path('monitoring.tfevents')
     if summary_path:
         self.summary_writer = summary.SummaryWriter(summary_path)
     else:
         self.summary_writer = None
     self.step = 0
     self.last_timestamp = datetime.datetime.now()
    def test_large_scalar(self):
        fs = patcher('stanza.monitoring.summary', '/test')
        open = fs.start()

        writer = summary.SummaryWriter('/test/large_scalar.tfevents')
        writer.log_scalar(1, 'bigvalue', 1.0e39)
        writer.flush()
        with open('/test/large_scalar.tfevents', 'r') as infile:
            events = list(summary.read_events(infile))

        self.assertEqual(len(events), 1)
        self.assertEqual(len(events[0].summary.value), 1)
        self.assertTrue(np.isinf(events[0].summary.value[0].simple_value))

        fs.stop()
    def test_large_hist(self):
        fs = patcher('stanza.monitoring.summary', '/test')
        open = fs.start()

        writer = summary.SummaryWriter('/test/large_hist.tfevents')
        writer.log_histogram(1, 'bighist', np.array(1.0e39))
        writer.flush()
        with open('/test/large_hist.tfevents', 'r') as infile:
            events = list(summary.read_events(infile))

        self.assertEqual(len(events), 1)
        self.assertEqual(len(events[0].summary.value), 1)
        self.assertTrue(events[0].summary.value[0].HasField('histo'),
                        events[0].summary.value[0])

        fs.stop()