Пример #1
0
class TensorBoardReport(chainer.training.Extension):
    def __init__(self, writer=None):
        self.writer = writer

    def __call__(self, trainer: chainer.training.Trainer):
        if self.writer is None:
            self.writer = SummaryWriter(Path(trainer.out))

        observations = trainer.observation
        n_iter = trainer.updater.iteration
        for n, v in observations.items():
            if isinstance(v, chainer.Variable):
                v = v.data
            if isinstance(v, chainer.cuda.cupy.ndarray):
                v = chainer.cuda.to_cpu(v)

            self.writer.add_scalar(n, v, n_iter)
class Tensorboard(extension.Extension):
    """Trainer extension to tensorboard the accumulated results to ABEJA Platform.
    This extension uses the log accumulated by a :class:`LogReport` extension
    to print specified entries of the log in a human-readable format.
    Args:
        entries (list of str): List of keys of observations to print.
        log_report (str or LogReport): Log report to accumulate the
            observations. This is either the name of a LogReport extensions
            registered to the trainer, or a LogReport instance to use
            internally.
    """
    def __init__(self, entries, out_dir='logs', log_report='LogReport'):
        self._entries = entries
        self._log_report = log_report
        self._log_len = 0  # number of observations already printed
        self.writer = SummaryWriter(out_dir)

    def __call__(self, trainer):
        log_report = self._log_report
        if isinstance(log_report, str):
            log_report = trainer.get_extension(log_report)
        elif isinstance(log_report, log_report_module.LogReport):
            log_report(trainer)  # update the log report
        else:
            raise TypeError('log report has a wrong type %s' %
                            type(log_report))

        log = log_report.log
        log_len = self._log_len
        while len(log) > log_len:
            self._print(log[log_len])
            log_len += 1
        self._log_len = log_len

    def serialize(self, serializer):
        log_report = self._log_report
        if isinstance(log_report, log_report_module.LogReport):
            log_report.serialize(serializer['_log_report'])

    def _print(self, observation):
        epoch = observation['epoch']
        for key, value in observation.items():
            self.writer.add_scalar(key, value, epoch)
class TensorBoardReport(chainer.training.Extension):
    def __init__(self, out_dir):
        self.writer = SummaryWriter(out_dir)

    def __call__(self, trainer):
        observations = trainer.observation
        n_iter = trainer.updater.iteration
        for n, v in observations.items():
            if isinstance(v, chainer.Variable):
                value = v.data
            # elif isinstance(v, chainer.cuda.cupy.ndarray):
            elif isinstance(v, chainer.backends.cuda.ndarray):
                value = chainer.cuda.to_cpu(v)
            else:
                value = v

            self.writer.add_scalar(n, value, n_iter)

        # Optimizer
        link = trainer.updater.get_optimizer('main').target
        for name, param in link.namedparams():
            self.writer.add_histogram(name, chainer.cuda.to_cpu(param.data),
                                      n_iter)
Пример #4
0
class TensorBoardReport(chainer.training.Extension):
    def __init__(self, writer=None):
        self.writer = writer

    def __call__(self, trainer: chainer.training.Trainer):
        if self.writer is None:
            self.writer = SummaryWriter(Path(trainer.out))

        observations = trainer.observation
        n_iter = trainer.updater.iteration
        for n, v in observations.items():
            if isinstance(v, chainer.Variable):
                v = v.data
            if isinstance(v, chainer.cuda.cupy.ndarray):
                v = chainer.cuda.to_cpu(v)

            self.writer.add_scalar(n, v, n_iter)

        link = trainer.updater.get_optimizer('main').target
        for name, param in link.namedparams():
            self.writer.add_histogram(name,
                                      chainer.cuda.to_cpu(param.data),
                                      n_iter,
                                      bins=100)
Пример #5
0
import math
import chainer
import numpy as np
from datetime import datetime
from tb_chainer import utils, SummaryWriter

vgg = chainer.links.VGG16Layers()
writer = SummaryWriter('runs/'+datetime.now().strftime('%B%d  %H:%M:%S'))
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
for n_iter in range(100):
    M_global = np.random.rand(1) # value to keep
    writer.add_scalar('M_global', M_global[0], n_iter)
    x = np.random.rand(32, 3, 64, 64) # output from network
    if n_iter % 10 == 0:
        x = utils.make_grid(x)
        writer.add_image('Image', x, n_iter)
        x = np.zeros(sample_rate*2)
        for i in range(x.shape[0]):
            x[i] = np.cos(freqs[n_iter//10] * np.pi * float(i) / float(sample_rate)) # sound amplitude should in [-1, 1]
        writer.add_audio('Audio', x, n_iter)
        for name, param in vgg.namedparams():
            writer.add_histogram(name, chainer.cuda.to_cpu(param.data), n_iter)
        writer.add_text('Text', 'text logged at step:'+str(n_iter), n_iter)
        writer.add_text('another Text', 'another text logged at step:'+str(n_iter), n_iter)
writer.close()
    writer = SummaryWriter(comment='-frozenlake-nonslippery')

    full_batch = []
    for iter_no, batch in enumerate(iterate_batches(env, net, BATCH_SIZE)):
        # reward_mean = float(np.mean(list(map(lambda s: s.reward, batch))))
        reward_mean = float(np.mean([s.reward for s in batch]))
        full_batch, obs, acts, reward_bound = filter_batch(full_batch + batch, PERCENTILE)
        if not full_batch:
            continue

        obs_v = Variable(np.asarray(obs, dtype=np.float32))
        acts_v = Variable(np.asarray(acts, dtype=np.int))
        full_batch = full_batch[-500:]

        action_scores_v = net(obs_v)
        loss_v = F.softmax_cross_entropy(action_scores_v, acts_v)
        loss_v.backward()

        optimizer.update()
        print("%d: loss=%.3f, reward_mean=%.3f, reward_bound=%.3f, length of batch=%d" % (
            iter_no, loss_v.data, reward_mean, reward_bound, len(full_batch)))
        writer.add_scalar("loss", loss_v.data, iter_no)
        writer.add_scalar("reward_mean", reward_mean, iter_no)
        writer.add_scalar("reward_bound", reward_bound, iter_no)

        if reward_mean > 0.8:
            print("Solved!")
            break
    writer.close()
Пример #7
0
                for action in range(self.env.action_space.n)
            ]
            self.values[state] = max(state_values)


if __name__ == "__main__":
    test_env = gym.make(ENV_NAME)
    agent = Agent()
    writer = SummaryWriter(comment='-v-iteration')

    iter_no = 0
    best_reward = 0.0

    while True:
        iter_no += 1
        agent.play_n_random_steps(100)
        agent.value_iteration()

        reward = 0.0
        for _ in range(TEST_EPISODES):
            reward += agent.play_episode(test_env)
        reward /= TEST_EPISODES
        writer.add_scalar("reward", reward, iter_no)
        if reward > best_reward:
            print("Best reward updated %.3f -> %.3f" % (best_reward, reward))
            best_reward = reward
        if reward > 0.80:
            print("Solved in %d iterations!" % iter_no)
            break
    writer.close()
Пример #8
0
                        [str(float(l.data))
                         for l in model.predictor.loss]) + '\n')
                logf.flush()
                if args.gpu >= 0: model.to_gpu()

            if (count % args.save) == 0:
                print('save the model')
                serializers.save_npz('models/' + str(count) + '.model', model)
                print('save the optimizer')
                serializers.save_npz('models/' + str(count) + '.state',
                                     optimizer)
                if args.tensorboard:
                    for name, param in model.predictor.namedparams():
                        writer.add_histogram(name,
                                             chainer.cuda.to_cpu(param.data),
                                             count)
                    writer.add_scalar('loss', float(model.loss.data), count)
            x_batch[0] = y_batch[0]
            if count > args.period:
                break
            count += 1

        seq = (seq + 1) % len(sequencelist)

# For logging graph structure
if args.tensorboard:
    model(chainer.Variable(xp.asarray(x_batch)),
          chainer.Variable(xp.asarray(y_batch)))
    writer.add_graph(model.y)
    writer.close()