Beispiel #1
0
import math
import chainer
import numpy as np
from datetime import datetime
from tb_chainer import utils, SummaryWriter

vgg = chainer.links.VGG16Layers()
writer = SummaryWriter('runs/'+datetime.now().strftime('%B%d  %H:%M:%S'))
sample_rate = 44100
freqs = [262, 294, 330, 349, 392, 440, 440, 440, 440, 440, 440]
for n_iter in range(100):
    M_global = np.random.rand(1) # value to keep
    writer.add_scalar('M_global', M_global[0], n_iter)
    x = np.random.rand(32, 3, 64, 64) # output from network
    if n_iter % 10 == 0:
        x = utils.make_grid(x)
        writer.add_image('Image', x, n_iter)
        x = np.zeros(sample_rate*2)
        for i in range(x.shape[0]):
            x[i] = np.cos(freqs[n_iter//10] * np.pi * float(i) / float(sample_rate)) # sound amplitude should in [-1, 1]
        writer.add_audio('Audio', x, n_iter)
        for name, param in vgg.namedparams():
            writer.add_histogram(name, chainer.cuda.to_cpu(param.data), n_iter)
        writer.add_text('Text', 'text logged at step:'+str(n_iter), n_iter)
        writer.add_text('another Text', 'another text logged at step:'+str(n_iter), n_iter)
writer.close()
Beispiel #2
0
            x_batch[0] = model.y.data[0].copy()
            if args.gpu >= 0: model.to_gpu()
            for j in range(args.ext):
                print('extended frameNo:' + str(j + 1))
                loss += model(chainer.Variable(xp.asarray(x_batch)),
                              chainer.Variable(xp.asarray(y_batch)))
                if j == args.ext - 1 and args.tensorboard:
                    g = graph.build_computational_graph([model.y])
                    node_name = NodeName(g.nodes)
                    for n in g.nodes:
                        if isinstance(n, chainer.variable.VariableNode) and \
                          not isinstance(n._variable(), chainer.Parameter) and n.data is not None:
                            img = utils.make_grid(
                                np.expand_dims(
                                    chainer.cuda.to_cpu(n.data[-1, ...]), 1))
                            writer.add_image(node_name.name(n), img, i)
                loss.unchain_backward()
                loss = 0
                if args.gpu >= 0: model.to_cpu()
                write_image(
                    model.y.data[0].copy(),
                    'result/test_' + str(i) + 'y_' + str(j + 1) + '.png')
                x_batch[0] = model.y.data[0].copy()
                if args.gpu >= 0: model.to_gpu()
            prednet.reset_state()
else:
    logf = open('loss.csv', 'w')
    count = 0
    seq = 0
    while count < args.period:
        imagelist = load_list(sequencelist[seq], args.root)