def test_image(self): try: with Logger('log_path') as log: img = np.random.rand(3, 20, 20) log.log_image('test_image', img, 0) except BaseException: self.fail()
def test_update(self): try: with Logger('log_path') as log: for i in range(100): log.log_scalar('test_update', sin(i / 10) * 10, i) sleep(.1) except BaseException: self.fail()
def test_shortcut(self): try: with Logger('log_path') as log: log_square = log.make_log_scalar('test_shortcut') for i in range(100): log_square(i**2) sleep(.1) except BaseException: self.fail()
def test_text(self): try: with Logger('log_path') as log: log.log_text('string', 'Test string', 0) log.log_text('list', ['A', 'B'], 0) log.log_text( '2D', [['Some', 'multidimensional'], ['string', 'tensor']], 0) except BaseException: self.fail()
def test_histogram(self): try: with Logger('log_path') as log: for i in range(40): log.log_histogram('normal', np.random.normal(0, i + 1, 500), i) log.log_histogram('chi', np.random.chisquare(6, 500), i) log.log_histogram( 'normal/multimodal', np.stack((np.random.normal(0, i + 1, 500), np.random.normal(100, i + 1, 500))), i) except BaseException: self.fail()
def train_base(model: Model, batch_iter: BatchIter, n_epochs: int, lr_policy: LearningRatePolicy, log_path: str, validate: Callable = None, log_iteration=False): """ Train a given model. Parameters ---------- model: Model the model to train batch_iter: BatchIter batch iterator n_epochs: int number of epochs to train lr_policy: LearningRate the learning rate policy log_path: str the path where the logs will be stored validate: callable, optional a function that calculates the loss and metrics on the validation set """ val_losses, metrics = None, None with Logger(log_path) as logger, batch_iter: train_log_write = logger.make_log_scalar('train/batch/loss') lr_log_write = logger.make_log_scalar('train/batch/lr') for epoch in range(n_epochs): # train the model train_losses = [] for inputs in batch_iter: train_losses.append(model.do_train_step(*inputs, lr=lr_policy.lr)) if log_iteration: train_log_write(train_losses[-1]) lr_log_write(lr_policy.lr) lr_policy.step_finished(train_losses[-1]) log_scalar_or_vector(logger, 'train/loss', np.mean(train_losses, axis=0), epoch) if validate is not None: val_losses, metrics = validate() for name, value in metrics.items(): log_scalar_or_vector(logger, f'val/metrics/{name}', value, epoch) log_scalar_or_vector(logger, 'val/loss', np.mean(val_losses, axis=0), epoch) log_scalar_or_vector(logger, 'train/lr', lr_policy.lr, epoch) lr_policy.epoch_finished(train_losses=train_losses, val_losses=val_losses, metrics=metrics)
def log_vector(logger: tensorboard_easy.Logger, tag: str, vector, step: int): """Adds a vector values to log.""" for i, value in enumerate(vector): logger.log_scalar(tag=tag + f'/{i}', value=value, step=step)
def make_log_vector(logger: tensorboard_easy.Logger, tag: str, first_step: int = 0) -> callable: def log(tag, value, step): log_vector(logger, tag, value, step) return logger._make_log(tag, first_step, log)
def test_image_exception(self): with Logger('log_path') as log: with self.assertRaises(TypeError): log.log_image('test_img_ex', np.random.rand(5, 10, 10), 0)