x_batch = x_train[indexes[i:i+batchsize]] loss = da.train(x_batch) sum_loss += float(loss.data) * batchsize if epoch == 0 and i == 0: with open('../output/da/graph.dot', 'w') as o: o.write(computational_graph.build_computational_graph((loss, )).dump()) with open('../output/da/graph.wo_split.dot', 'w') as o: g = computational_graph.build_computational_graph((loss, ), remove_split=True) o.write(g.dump()) print 'train mean loss={}'.format(sum_loss / N) losses += [sum_loss / N] # 評価 sum_loss = 0 for i in xrange(0, N_test, batchsize): x_batch = x_test[i:i+batchsize] loss = da.test(x_batch) sum_loss += float(loss.data) * batchsize print 'test mean loss={}'.format(sum_loss / N_test) losses += [sum_loss / N_test] all_loss.append(losses) # 可視化 h = MinMaxScaler().fit_transform(da.model.encoder.W) visualize(h, '../img/da/w/da_{0:04d}.jpg'.format(epoch + 1), (8, 8), (10, 10)) # モデルの保存 pickle.dump(da, open('../output/da/model.pkl', 'wb')) # loss, accuracyの保存 pickle.dump(all_loss, open('../output/da/loss.pkl', 'wb')) # 可視化
for epoch in xrange(n_epoch): print 'epoch', epoch indexes = np.random.permutation(N) losses = [] sum_loss = 0 for i in xrange(0, N, batchsize): x_batch = x_train[indexes[i:i+batchsize]] loss = da1.train(x_batch) sum_loss += float(loss.data) * batchsize print 'train mean loss={}'.format(sum_loss / N) losses += [sum_loss / N] # 評価 sum_loss = 0 for i in xrange(0, N_test, batchsize): x_batch = x_test[i:i+batchsize] loss = da1.test(x_batch) sum_loss += float(loss.data) * batchsize print 'test mean loss={}'.format(sum_loss / N_test) losses += [sum_loss / N_test] all_loss.append(losses) # 可視化 h1 = MinMaxScaler().fit_transform(da1.model.encoder.W) visualize(h1, '../img/sda/da1/sda_da1_{0:04d}.jpg'.format(epoch + 1), (8, 8), (10, 10)) pickle.dump(da1, open(da1_filename, 'wb')) pickle.dump(all_loss, open('../output/sda/loss_da1.pkl', 'wb')) # 2層目 da2_filename = '../output/sda/model_da2.pkl' try: da2 = pickle.load(open(da2_filename)) except IOError: