def test_draw_loss_curve(): # log of supervised training supervised_log = '''\ 2015-12-01 21:33:06,665 [INFO] epoch:00; train mean loss=inf; accuracy=0.793402775527; 2015-12-01 21:33:08,554 [INFO] epoch:00; test mean loss=0.0223661520096; accuracy=0.99583333234; 2015-12-01 21:33:36,166 [INFO] epoch:01; train mean loss=0.267008397272; accuracy=0.943055551189; 2015-12-01 21:33:38,054 [INFO] epoch:01; test mean loss=0.0319371443122; accuracy=0.99027777546; 2015-12-01 21:34:05,669 [INFO] epoch:02; train mean loss=0.162450927842; accuracy=0.967013884543; 2015-12-01 21:34:07,559 [INFO] epoch:02; test mean loss=0.00178737615885; accuracy=0.99861111078; 2015-12-01 21:34:35,193 [INFO] epoch:03; train mean loss=0.25237462795; accuracy=0.968055551458; 2015-12-01 21:34:37,084 [INFO] epoch:03; test mean loss=2.72755467041e-05; accuracy=1.0; 2015-12-01 21:35:04,718 [INFO] epoch:04; train mean loss=0.270737466685; accuracy=0.967361107469; 2015-12-01 21:35:06,612 [INFO] epoch:04; test mean loss=0.0172848692339; accuracy=0.99444444312; 2015-12-01 21:35:37,238 [INFO] epoch:05; train mean loss=0.138665390847; accuracy=0.978124997682; 2015-12-01 21:35:39,126 [INFO] epoch:05; test mean loss=1.31297177821e-07; accuracy=1.0; ''' log_file = tempfile.mktemp() with open(log_file, 'w') as f: f.write(supervised_log) apc_od.draw_loss_curve(log_file, tempfile.mktemp(), no_acc=False) # log of unsupervised training unsupervised_log = '''\ 2015-11-24 23:57:03,540 [INFO] epoch:00; train mean loss=0.00389542509668; 2015-11-24 23:57:07,646 [INFO] epoch:00; test mean loss=0.000338642162872; 2015-11-24 23:57:25,875 [INFO] epoch:01; train mean loss=0.000212499197854; 2015-11-24 23:57:30,128 [INFO] epoch:01; test mean loss=0.000117461651098; 2015-11-24 23:57:48,356 [INFO] epoch:02; train mean loss=8.24619589366e-05; 2015-11-24 23:57:52,628 [INFO] epoch:02; test mean loss=5.45797932015e-05; 2015-11-24 23:58:10,803 [INFO] epoch:03; train mean loss=4.39608514929e-05; 2015-11-24 23:58:15,035 [INFO] epoch:03; test mean loss=3.45692531482e-05; 2015-11-24 23:58:33,109 [INFO] epoch:04; train mean loss=2.96583379774e-05; 2015-11-24 23:58:37,317 [INFO] epoch:04; test mean loss=2.53010121924e-05; 2015-11-24 23:58:55,482 [INFO] epoch:05; train mean loss=2.26222944017e-05; 2015-11-24 23:58:59,687 [INFO] epoch:05; test mean loss=2.04618284746e-05; ''' log_file = tempfile.mktemp() with open(log_file, 'w') as f: f.write(unsupervised_log) apc_od.draw_loss_curve(log_file, tempfile.mktemp(), no_acc=True)
#!/usr/bin/env python # -*- coding: utf-8 -*- import argparse from apc_od import draw_loss_curve if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('-n', '--id-num', type=int, required=True, help='number of loss id') parser.add_argument('--log-file', type=str, required=True, help='log filename') parser.add_argument('--out-file', type=str, required=True, help='output filename') parser.add_argument('--no-acc', action='store_true', help='no accuracy') args = parser.parse_args() n_id = args.id_num log_file = args.log_file out_file = args.out_file no_acc = args.no_acc for i in xrange(n_id): draw_loss_curve(i, log_file, out_file, no_acc)
def main_loop(self, n_epoch=10, save_interval=None, save_encoded=True): save_interval = save_interval or (n_epoch // 10) train_data = get_raw(which_set='train') test_data = get_raw(which_set='test') N_train = len(train_data.filenames) N_test = len(test_data.filenames) logging.info('converting dataset to x and t data') train_x, train_t = self.dataset_to_xt_data(train_data, self.crop_roi) test_x, test_t = self.dataset_to_xt_data(test_data, self.crop_roi) for epoch in xrange(0, n_epoch): # train sum_loss, sum_accuracy, _, _ = \ self.batch_loop(train_x, train_t, train=True) for loss_id, sl in sorted(sum_loss.items()): mean_loss = sl / N_train msg = 'epoch:{:02d}; train mean loss{}={};'\ .format(epoch, loss_id, mean_loss) if self.is_supervised: mean_accuracy = sum_accuracy / N_train msg += ' accuracy={};'.format(mean_accuracy) logging.info(msg) print(msg) # test sum_loss, sum_accuracy, x_batch, y_batch = \ self.batch_loop(test_x, test_t, train=False) for loss_id, sl in sorted(sum_loss.items()): mean_loss = sl / N_test msg = 'epoch:{:02d}; test mean loss{}={};'\ .format(epoch, loss_id, mean_loss) if self.is_supervised: mean_accuracy = sum_accuracy / N_test msg += ' accuracy={};'.format(mean_accuracy) logging.info(msg) print(msg) # save model and input/encoded/decoded if epoch % save_interval == (save_interval - 1): print('epoch:{:02d}; saving'.format(epoch)) # save model model_path = osp.join( self.log_dir, '{name}_model_{epoch}.h5'.format( name=self.model_name, epoch=epoch)) serializers.save_hdf5(model_path, self.model) # save optimizer for i, opt in enumerate(self.optimizers): opt_path = osp.join( self.log_dir, '{name}_optimizer_{epoch}_{i}.h5'.format( name=self.model_name, epoch=epoch, i=i)) serializers.save_hdf5(opt_path, opt) # save x_data x_path = osp.join(self.log_dir, 'x_{}.pkl'.format(epoch)) with open(x_path, 'wb') as f: pickle.dump(x_batch, f) # save x if not self.is_supervised: x_hat_path = osp.join(self.log_dir, 'x_hat_{}.pkl'.format(epoch)) with open(x_hat_path, 'wb') as f: pickle.dump(y_batch, f) # save x_hat tile_ae_inout( x_batch, y_batch, osp.join(self.log_dir, 'X_{}.jpg'.format(epoch))) if save_encoded: x = Variable(cuda.to_gpu(x_batch), volatile=True) z = self.model.encode(x) tile_ae_encoded( cuda.to_cpu(z.data), osp.join(self.log_dir, 'x_encoded_{}.jpg'.format(epoch))) for i in xrange(len(self.optimizers)): draw_loss_curve( loss_id=i, logfile=self.log_file, outfile=osp.join(self.log_dir, 'loss_curve{}.jpg'.format(i)), no_acc=not self.is_supervised, )