def show_epoch_summary(network): delay_limit = 1. # delay time in seconds prev_summary_time = None delay_history_length = 10 terminal_output_delays = deque(maxlen=delay_history_length) table_drawer = table.TableDrawer(table.Column(name="Epoch #"), table.NumberColumn(name="Train err"), table.NumberColumn(name="Valid err"), table.TimeColumn(name="Time", width=10), stdout=network.logs.write) table_drawer.start() try: yield while True: now = time.time() if prev_summary_time is not None: time_delta = now - prev_summary_time terminal_output_delays.append(time_delta) table_drawer.row([ network.last_epoch, network.errors.last() or '-', network.validation_errors.last() or '-', network.training.epoch_time, ]) prev_summary_time = now if len(terminal_output_delays) == delay_history_length: prev_summary_time = None average_delay = np.mean(terminal_output_delays) if average_delay < delay_limit: show_epoch = int(network.training.show_epoch * math.ceil(delay_limit / average_delay)) table_drawer.line() table_drawer.message("Too many outputs in a terminal.") table_drawer.message("Set up logging after each {} epochs" "".format(show_epoch)) table_drawer.line() terminal_output_delays.clear() network.training.show_epoch = show_epoch yield finally: table_drawer.finish() network.logs.newline()
def test_time_column(self): test_cases = [ Case(input_value=0.1, expected_output='0.1 sec'), Case(input_value=1.0, expected_output='1.0 sec'), Case(input_value=1.1234, expected_output='1.1 sec'), Case(input_value=9.99, expected_output='10.0 sec'), Case(input_value=10, expected_output='00:00:10'), Case(input_value=70, expected_output='00:01:10'), Case(input_value=3680, expected_output='01:01:20'), ] col1 = table.TimeColumn(name="Test1") for case in test_cases: self.assertEqual(col1.format_value(case.input_value), case.expected_output)
def test_summary_table_slow_training(self): with catch_stdout() as out: network = algorithms.GradientDescent((2, 3, 1), verbose=True) summary = SummaryTable(network, table_builder=table.TableBuilder( table.Column(name="Epoch #"), table.NumberColumn(name="Train err", places=4), table.NumberColumn(name="Valid err", places=4), table.TimeColumn(name="Time", width=10), stdout=network.logs.write), delay_limit=0, delay_history_length=1) for _ in range(3): network.training.epoch_time = 0.1 summary.show_last() terminal_output = out.getvalue() self.assertNotIn("Too many outputs", terminal_output)
def train(self, input_train, target_train=None, input_test=None, target_test=None, epochs=100, epsilon=None, summary='table'): """ Method train neural network. Parameters ---------- input_train : array-like target_train : array-like or None input_test : array-like or None target_test : array-like or None epochs : int Defaults to `100`. epsilon : float or None Defaults to ``None``. """ show_epoch = self.show_epoch logs = self.logs training = self.training = AttributeKeyDict() if epochs <= 0: raise ValueError("Number of epochs needs to be greater than 0.") if epsilon is not None and epochs <= 2: raise ValueError("Network should train at teast 3 epochs before " "check the difference between errors") logging_info_about_the_data(self, input_train, input_test) logging_info_about_training(self, epochs, epsilon) logs.newline() if summary == 'table': summary = SummaryTable( table_builder=table.TableBuilder( table.Column(name="Epoch #"), table.NumberColumn(name="Train err", places=4), table.NumberColumn(name="Valid err", places=4), table.TimeColumn(name="Time", width=10), stdout=logs.write ), network=self, delay_limit=1., delay_history_length=10, ) elif summary == 'inline': summary = InlineSummary(network=self) else: raise ValueError("`{}` is unknown summary type" "".format(summary)) iterepochs = create_training_epochs_iterator(self, epochs, epsilon) show_epoch = parse_show_epoch_property(self, epochs, epsilon) training.show_epoch = show_epoch # Storring attributes and methods in local variables we prevent # useless __getattr__ call a lot of times in each loop. # This variables speed up loop in case on huge amount of # iterations. training_errors = self.errors validation_errors = self.validation_errors shuffle_data = self.shuffle_data train_epoch = self.train_epoch epoch_end_signal = self.epoch_end_signal train_end_signal = self.train_end_signal on_epoch_start_update = self.on_epoch_start_update is_first_iteration = True can_compute_validation_error = (input_test is not None) last_epoch_shown = 0 ############################################# symMatrix = tt.dmatrix("symMatrix") symEigenvalues, eigenvectors = tt.nlinalg.eig(symMatrix) get_Eigen = theano.function([symMatrix], [symEigenvalues, eigenvectors]) ############################################# with logs.disable_user_input(): for epoch in iterepochs: validation_error = None epoch_start_time = time.time() on_epoch_start_update(epoch) if shuffle_data: data = shuffle(*as_tuple(input_train, target_train)) input_train, target_train = data[:-1], data[-1] try: train_error = train_epoch(input_train, target_train) print epoch name=str(self) if(name.split('(')[0]=='Hessian'): H=self.variables.hessian.get_value() ev,_=get_Eigen(H) print "positive EV ",np.sum(ev>0) print "Just zero EV", np.sum(ev==0) print "Zero EV ", np.sum(ev==0)+np.sum((ev < 0) & (ev > (np.min(ev)/2.0))) print "Neg EV ", np.sum(ev<0) print "Max EV ",np.max(ev) print "Min EV ",np.min(ev) s=str(self.itr)+'.npy' np.save(s,ev) if can_compute_validation_error: validation_error = self.prediction_error(input_test, target_test) training_errors.append(train_error) validation_errors.append(validation_error) epoch_finish_time = time.time() training.epoch_time = epoch_finish_time - epoch_start_time if epoch % training.show_epoch == 0 or is_first_iteration: summary.show_last() last_epoch_shown = epoch if epoch_end_signal is not None: epoch_end_signal(self) is_first_iteration = False except StopTraining as err: # TODO: This notification breaks table view in terminal. # I need to show it in a different way. logs.message("TRAIN", "Epoch #{} stopped. {}" "".format(epoch, str(err))) break if epoch != last_epoch_shown: summary.show_last() if train_end_signal is not None: train_end_signal(self) summary.finish() logs.newline()
def train(self, input_train, target_train=None, input_test=None, target_test=None, epochs=100, epsilon=None, summary_type='table'): """ Method train neural network. Parameters ---------- input_train : array-like target_train : array-like or Npne input_test : array-like or None target_test : array-like or None epochs : int Defaults to `100`. epsilon : float or None Defaults to ``None``. """ show_epoch = self.show_epoch logs = self.logs training = self.training = AttributeKeyDict() if epochs <= 0: raise ValueError("Number of epochs needs to be greater than 0.") if epsilon is not None and epochs <= 2: raise ValueError("Network should train at teast 3 epochs before " "check the difference between errors") if summary_type == 'table': logging_info_about_the_data(self, input_train, input_test) logging_info_about_training(self, epochs, epsilon) logs.newline() summary = SummaryTable( table_builder=table.TableBuilder( table.Column(name="Epoch #"), table.NumberColumn(name="Train err"), table.NumberColumn(name="Valid err"), table.TimeColumn(name="Time", width=10), stdout=logs.write ), network=self, delay_limit=1., delay_history_length=10, ) elif summary_type == 'inline': summary = InlineSummary(network=self) else: raise ValueError("`{}` is unknown summary type" "".format(summary_type)) iterepochs = create_training_epochs_iterator(self, epochs, epsilon) show_epoch = parse_show_epoch_property(self, epochs, epsilon) training.show_epoch = show_epoch # Storring attributes and methods in local variables we prevent # useless __getattr__ call a lot of times in each loop. # This variables speed up loop in case on huge amount of # iterations. training_errors = self.errors validation_errors = self.validation_errors shuffle_data = self.shuffle_data train_epoch = self.train_epoch epoch_end_signal = self.epoch_end_signal train_end_signal = self.train_end_signal on_epoch_start_update = self.on_epoch_start_update is_first_iteration = True can_compute_validation_error = (input_test is not None) last_epoch_shown = 0 with logs.disable_user_input(): for epoch in iterepochs: validation_error = np.nan epoch_start_time = time.time() on_epoch_start_update(epoch) if shuffle_data: input_train, target_train = shuffle(input_train, target_train) try: train_error = train_epoch(input_train, target_train) if can_compute_validation_error: validation_error = self.prediction_error(input_test, target_test) training_errors.append(train_error) validation_errors.append(validation_error) epoch_finish_time = time.time() training.epoch_time = epoch_finish_time - epoch_start_time if epoch % training.show_epoch == 0 or is_first_iteration: summary.show_last() last_epoch_shown = epoch if epoch_end_signal is not None: epoch_end_signal(self) is_first_iteration = False except StopNetworkTraining as err: # TODO: This notification breaks table view in terminal. # I need to show it in a different way. logs.message("TRAIN", "Epoch #{} stopped. {}" "".format(epoch, str(err))) break if epoch != last_epoch_shown: summary.show_last() if train_end_signal is not None: train_end_signal(self) summary.finish() logs.newline() logs.message("TRAIN", "Trainig finished")
def train(self, input_train, target_train=None, input_test=None, target_test=None, epochs=100, epsilon=None, summary_type='table'): """ Method train neural network. Parameters ---------- input_train : array-like target_train : array-like or None input_test : array-like or None target_test : array-like or None epochs : int Defaults to `100`. epsilon : float or None Defaults to ``None``. """ show_epoch = self.show_epoch logs = self.logs training = self.training = AttributeKeyDict() if epochs <= 0: raise ValueError("Number of epochs needs to be greater than 0.") if epsilon is not None and epochs <= 2: raise ValueError("Network should train at teast 3 epochs before " "check the difference between errors") if summary_type == 'table': logging_info_about_the_data(self, input_train, input_test) logging_info_about_training(self, epochs, epsilon) logs.newline() summary = SummaryTable( table_builder=table.TableBuilder( table.Column(name="Epoch #"), table.NumberColumn(name="Train err"), table.NumberColumn(name="Valid err"), table.TimeColumn(name="Time", width=10), stdout=logs.write ), network=self, delay_limit=1., delay_history_length=10, ) elif summary_type == 'inline': summary = InlineSummary(network=self) else: raise ValueError("`{}` is unknown summary type" "".format(summary_type)) iterepochs = create_training_epochs_iterator(self, epochs, epsilon) show_epoch = parse_show_epoch_property(self, epochs, epsilon) training.show_epoch = show_epoch # Storring attributes and methods in local variables we prevent # useless __getattr__ call a lot of times in each loop. # This variables speed up loop in case on huge amount of # iterations. training_errors = self.errors validation_errors = self.validation_errors shuffle_data = self.shuffle_data train_epoch = self.train_epoch epoch_end_signal = self.epoch_end_signal train_end_signal = self.train_end_signal on_epoch_start_update = self.on_epoch_start_update is_first_iteration = True can_compute_validation_error = (input_test is not None) last_epoch_shown = 0 symMatrix = tt.dmatrix("symMatrix") symEigenvalues, eigenvectors = tt.nlinalg.eig(symMatrix) get_Eigen = theano.function([symMatrix], [symEigenvalues, eigenvectors] ) epsilon = [] alpha = [] alpha0 = [] with logs.disable_user_input(): for epoch in iterepochs: validation_error = None epoch_start_time = time.time() on_epoch_start_update(epoch) if shuffle_data: input_train, target_train = shuffle(input_train, target_train) try: train_error = train_epoch(input_train, target_train) H = self.variables.hessian.get_value() ev, _ = get_Eigen(H) if can_compute_validation_error: validation_error = self.prediction_error(input_test, target_test) epsilon.append(train_error) alpha.append(numpy.sum(ev < 0)) alpha0.append(numpy.sum(ev == 0)) training_errors.append(train_error) validation_errors.append(validation_error) epoch_finish_time = time.time() training.epoch_time = epoch_finish_time - epoch_start_time if epoch % training.show_epoch == 0 or is_first_iteration: summary.show_last() last_epoch_shown = epoch if epoch_end_signal is not None: epoch_end_signal(self) is_first_iteration = False except StopNetworkTraining as err: # TODO: This notification breaks table view in terminal. # I need to show it in a different way. logs.message("TRAIN", "Epoch #{} stopped. {}" "".format(epoch, str(err))) break if epoch != last_epoch_shown: summary.show_last() if train_end_signal is not None: train_end_signal(self) summary.finish() logs.newline() plt.plot(alpha,epsilon,'r') plt.plot(alpha0,epsilon,'b') plt.xlabel('alpha') plt.ylabel('epsilon') # want to collect the output of stdout in a variable capture = StringIO() capture.truncate(0) save_stdout = sys.stdout sys.stdout = capture print self.connection sys.stdout=save_stdout s = capture.getvalue() s=s.split('\n')[0:][0] str = self.class_name() str1 = s+'---'+str+'-alpha-epsilon'+'.eps' plt.savefig(str1,format='eps',dpi=1000) plt.plot(iterepochs,epsilon) plt.xlabel('iterepochs') plt.ylabel('epsilon') str2=s+'---'+str+'-epsilon-iterepochs'+'.eps' plt.savefig(str2,format='eps',dpi=1000)