def _get_default_feed_dict(self, batch, is_training): """Reset status if necessary""" feed_dict = super()._get_default_feed_dict(batch, is_training) assert isinstance(batch, DataSet) # (1) If a new sequence begin during training or validation, reset state reset_flag = batch.should_reset_state if hub.supreme_reset_flag is not None: reset_flag = hub.supreme_reset_flag if reset_flag: self.reset_buffers(batch.size, is_training) if is_training and hub.notify_when_reset: console.write_line('- ' * 40) # (2) Decrease batch size if necessary if batch.active_indices is not None: # This signal is set in DataSet.gen_rnn_batches invoked by SequenceSet self.decrease_buffer_size(batch.active_indices, is_training) # BETA: for parallel engine logic if batch.should_partially_reset_state: self.reset_part_buffer(batch.reset_batch_indices, batch.reset_values) # TODO: to be deprecated if hub.notify_when_reset and False: if batch.reset_values is not None: info = [(i, v) for i, v in zip( batch.reset_batch_indices, batch.reset_values)] else: info = batch.reset_batch_indices console.write_line('{}'.format(info)) # (3) Set status buffer to status placeholder feed_dict.update(self._get_rnn_dict(is_training, batch.size)) return feed_dict
def end_round(self, rnd): for i, metric in enumerate(self._metrics): assert isinstance(metric, Metric) and metric.activated if metric.sleep: continue console.write_line('Branch {} {}'.format(i + 1, '- ' * 35)) metric.end_round(rnd) console.write_line('- ' * 40)
def _get_default_feed_dict(self, batch, is_training): feed_dict = Feedforward._get_default_feed_dict(self, batch, is_training) if self.master is Recurrent: assert isinstance(batch, DataSet) # If a new sequence begin while training, reset state if is_training: if batch.should_reset_state: if hub.notify_when_reset: console.write_line('- ' * 40) self.reset_state(batch.size) if batch.should_partially_reset_state: if hub.notify_when_reset: if batch.reset_values is not None: info = [(i, v) for i, v in zip( batch.reset_batch_indices, batch.reset_values)] else: info = batch.reset_batch_indices console.write_line('{}'.format(info)) self.reset_part_state(batch.reset_batch_indices, batch.reset_values) batch_size = None if is_training else batch.size # If is not training, always set a zero state to model feed_dict.update(self._get_state_dict(batch_size=batch_size)) return feed_dict
def _snapshot(self, progress): if self._snapshot_function is None: return filename = 'train_{}_episode'.format(self.counter) fullname = "{}/{}".format(self.snapshot_dir, filename) self._snapshot_function(fullname) console.clear_line() console.write_line("[Snapshot] snapshot saved to {}".format(filename)) console.print_progress(progress=progress)
def show_notes(self): console.section('Notes') console.write_line(self._note.content)