def before_run(self, run_context):
     layer = 'hidden1'
     tensors = {
         'weights': tfutils._as_graph_element('%s/kernel:0' % layer),
         'biases': tfutils._as_graph_element('%s/bias:0' % layer),
     }
     return SessionRunArgs(tensors)
Exemplo n.º 2
0
 def before_run(self, run_context):  # pylint: disable=unused-argument
     self._should_trigger = self._timer.should_trigger_for_step(
         self._iter_count)
     if self._should_trigger:
         return SessionRunArgs(self._current_tensors)
     else:
         return None
Exemplo n.º 3
0
 def before_run(self, run_context):
     self._request_summary = (self._next_step is None
                              or self._timer.should_trigger_for_step(
                                  self._next_step))
     requests = {"global_step": self._global_step_tensor}
     opts = (tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
             if self._request_summary else None)
     return SessionRunArgs(requests, options=opts)
Exemplo n.º 4
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        requests = {"global_step": self._global_step_tensor}
        for n, t in self._tensors.items():
            requests[n] = t
        self._generate = (self._next_step is None or
                          self._timer.should_trigger_for_step(self._next_step))

        return SessionRunArgs(requests)
    def before_run(self, run_context):  # pylint: disable=unused-argument
        requests = {"global_step": self._global_step_tensor}
        if self._request_summary:
            if self._summary_op is not None:
                requests["summary"] = self._summary_op
            elif self._scaffold.summary_op is not None:
                requests["summary"] = self._scaffold.summary_op

        return SessionRunArgs(requests)
Exemplo n.º 6
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if self._last_saved_time is None:
            # Write graph in the first call.
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            self._summary_writer.add_graph(ops.get_default_graph())

        return SessionRunArgs(self._global_step_tensor)
Exemplo n.º 7
0
 def before_run(self, run_context):  # pylint: disable=unused-argument
     global task_type
     global task_index
     global name
     global is_training
     if is_training == 0:
         xml_server.serverStarted(task_type, task_index, name)
         is_training = 1
     return SessionRunArgs(self._global_step_tensor)
Exemplo n.º 8
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        self._request_summary = not self._done
        requests = {"global_step": self._global_step_tensor}
        if self._request_summary:
            if self._get_summary_op() is not None:
                # print(self._iter_count)
                requests["summary"] = self._get_summary_op()

        return SessionRunArgs(requests)
Exemplo n.º 9
0
  def before_run(self, run_context):  # pylint: disable=unused-argument
    self._request_summary = (
        self._next_step is None or
        self._timer.should_trigger_for_step(self._next_step))
    requests = {"global_step": self._global_step_tensor}
    if self._request_summary:
      if self._get_summary_op() is not None:
        requests["summary"] = self._get_summary_op()

    return SessionRunArgs(requests)
  def before_run(self, run_context):
    """Essentially a copy of before_run as defined in the base class, except we
    don't add the default graph or any meta-graph data to the SummaryWriter"""
    if self._timer.last_triggered_step() is None:
      training_util.write_graph(
          ops.get_default_graph().as_graph_def(add_shapes=True),
          self._checkpoint_dir,
          "graph.pbtxt")
      saver_def = self._get_saver().saver_def if self._get_saver() else None

    return SessionRunArgs(self._global_step_tensor)
Exemplo n.º 11
0
 def before_run(self, run_context):
     self._request_summary = (self._next_step is None
                              or self._timer.should_trigger_for_step(
                                  self._next_step))
     requests = {}  #{"global_step": self._global_step_tensor}
     opts = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
     self.start_time = time.time()
     date_time = datetime.datetime.utcfromtimestamp(
         self.start_time).strftime('%Y-%m-%d %H:%M:%S')
     tf.logging.info(f'Before Run: {date_time}')
     return SessionRunArgs(requests, options=opts)
Exemplo n.º 12
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if self._timer.last_triggered_step() is None:
            # Write graph in the first call.
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            saver_def = self._saver.saver_def if self._saver else None
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=saver_def)
            self._summary_writer.add_graph(graph)
            self._summary_writer.add_meta_graph(meta_graph_def)

        return SessionRunArgs(self._global_step_tensor)
Exemplo n.º 13
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        requests = run_context.original_args.fetches
        #requests = {"global_steps":self._global_step_tensor}
        self._global_steps = run_context.session.run(self._global_step_tensor)
        self._should_sum = self._timer_sum.should_trigger_for_step(
            self._global_steps + 1)
        self._should_debug = self._timer_debug.should_trigger_for_step(
            self._global_steps + 1)

        if self._timer_sum and self._should_sum and self._summary_op != None:
            requests["summary"] = self._summary_op
        if self._timer_debug and self._should_debug and self._debug_outputs_map != None:
            requests["debug_outputs"] = self._debug_outputs_map
        self._last_time = time.time()
        return SessionRunArgs(requests)
Exemplo n.º 14
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if self._timer.last_triggered_step() is None:
            # We do write graph and saver_def at the first call of before_run.
            # We cannot do this in begin, since we let other hooks to change graph and
            # add variables in begin. Graph is finalized after all begin calls.
            training_util.write_graph(
                ops.get_default_graph().as_graph_def(add_shapes=True),
                self._checkpoint_dir, "graph.pbtxt")
            saver_def = self._get_saver().saver_def if self._get_saver(
            ) else None
            graph = ops.get_default_graph()
            meta_graph_def = meta_graph.create_meta_graph_def(
                graph_def=graph.as_graph_def(add_shapes=True),
                saver_def=saver_def)

        return SessionRunArgs(self._global_step_tensor)
Exemplo n.º 15
0
    def before_run(self, run_context):
        session_args = run_context.original_args
        fetches = session_args.fetches
        feed_dict = session_args.feed_dict
        options = session_args.options

        # does this work?
        if options:
            options.report_tensor_allocations_upon_oom = True
        else:
            options = tf.RunOptions(report_tensor_allocations_upon_oom=True)
        session_args = SessionRunArgs(fetches=fetches,
                                      feed_dict=feed_dict,
                                      options=options)

        return session_args
Exemplo n.º 16
0
 def before_run(self, run_context):
     if self._timer.last_triggered_step() is None:
         # We do write graph and saver_def at the first call of before_run.
         # We cannot do this in begin, since we let other hooks to change graph and
         # add variables in begin. Graph is finalized after all begin calls.
         training_util.write_graph(
             ops.get_default_graph().as_graph_def(add_shapes=True),
             self._checkpoint_dir, "graph.pbtxt")
         graph = ops.get_default_graph()
         meta_graph_def = meta_graph.create_meta_graph_def(
             graph_def=graph.as_graph_def(add_shapes=True),
             saver_def=self._saver.saver_def)
         self._summary_writer.add_graph(graph)
         self._summary_writer.add_meta_graph(meta_graph_def)
     requests = {"global_steps": self._global_step_tensor}
     return SessionRunArgs(requests)
Exemplo n.º 17
0
  def before_run(self, run_context):  # pylint: disable=unused-argument
    if self._first_run is  True:
      self._curr_epoch += 1
      print('Epoch %s/%s:' % (self._curr_epoch, self._epochs))
      self.progbar = Progbar(target=self._step_per_epoch)
      self._first_run = False

    elif self._curr_step % self._step_per_epoch == 0:
      self._curr_epoch += 1
      self._curr_step = 0
      print('Epoch %s/%s:' % (self._curr_epoch, self._epochs))
      self.progbar = Progbar(target=self._step_per_epoch)

    if self._tensors:
      return SessionRunArgs(self._current_tensors)

    return None
Exemplo n.º 18
0
    def before_run(self, run_context):  # pylint: disable=unused-argument
        if S("optimizer.use_custom") and S("optimizer.memory_size") > 1:
            self._request_summary = (
                self._iter_count %
                S("optimizer.memory_size") == S("optimizer.memory_size") - 1
            )  # and self._timer.should_trigger_for_step(self._next_step)
        else:
            self._request_summary = (self._next_step is None
                                     or self._timer.should_trigger_for_step(
                                         self._next_step))
        requests = {"global_step": self._global_step_tensor}
        if self._request_summary:
            if self._get_summary_op() is not None:
                # print(self._iter_count)
                requests["summary"] = self._get_summary_op()

        return SessionRunArgs(requests)
Exemplo n.º 19
0
 def before_run(self, run_context):
     # For the first run, record a SessionLog.START at the pre-run global step.
     if self._current_step is None:
         self._current_step = run_context.session.run(
             self._global_step_tensor)
         with ops.default_session(run_context.session):
             self._summary_writer.add_session_log(
                 SessionLog(status=SessionLog.START), self._current_step)
     requests = {"global_step": self._global_step_tensor}
     self._request_summary = self._timer.should_trigger_for_step(
         self._current_step)
     if self._request_summary:
         self._timer.update_last_triggered_step(self._current_step)
         if self._get_summary_op() is not None:
             requests["summary"] = self._get_summary_op()
     feeds = {}
     if self._placeholder is not None and self._request_summary:
         feeds[self._placeholder] = self._request_summary
     args = SessionRunArgs(fetches=requests, feed_dict=feeds)
     return args
Exemplo n.º 20
0
    def before_run(self, run_context):
        self._run_begin = time.time()
        if self._step > 0 and self._step % self._every_n_steps == 0:
            arg_map = {}

            for name in [
                    self._images_name, self._labels_name, self._filenames_name,
                    self._raw_images_name, self._heat_map_features_name,
                    self._probs_name
            ]:
                if name is not None:
                    try:
                        arg_map[
                            name] = basic_session_run_hooks._as_graph_element(
                                name)
                    except Exception as e:
                        if not self.is_logged:
                            tf.logging.error('{} error {}'.format(name, e))
                            self.is_logged = True

            arg_map['global_step'] = self._global_step_tensor
            return SessionRunArgs(arg_map)
Exemplo n.º 21
0
 def before_run(self, run_context):  # pylint: disable=unused-argument
     if self._iter_count % self._every_n_iter == 0:
         return SessionRunArgs(self._current_tensors)
     else:
         return None
Exemplo n.º 22
0
 def before_run(self, run_context):
     return SessionRunArgs(fetches=self._fetches)
Exemplo n.º 23
0
 def before_run(self, run_context):
     return SessionRunArgs([self._global_step_tensor, self.tensor])
Exemplo n.º 24
0
 def before_run(self, run_context):  # pylint: disable=unused-argument
     self._tic = time.time()
     if self._step % self._every_n_iter == 0:
         return SessionRunArgs(fetches=self._avg_ops)
Exemplo n.º 25
0
 def before_run(self, run_context):  # pylint: disable=unused-argument
     return SessionRunArgs([
         self.global_step_tensor, self.fake_seq, self.labels, self.d_score
     ])
Exemplo n.º 26
0
 def before_run(self, run_context):
     del run_context
     return SessionRunArgs(
         fetches={KMeansClustering.LOSS_OP_NAME: self._loss_tensor})
Exemplo n.º 27
0
 def before_run(self, run_context):
     feed_dict = self.build_feed_dict(graph=run_context.session.graph)
     return SessionRunArgs(fetches=None, feed_dict=feed_dict)
Exemplo n.º 28
0
 def before_run(self, run_context):
     return SessionRunArgs(self.early_stop_tensor)
Exemplo n.º 29
0
 def before_run(self, run_context):  # pylint: disable=unused-argument
     return SessionRunArgs(self._loss_tensor)
Exemplo n.º 30
0
 def before_run(self, run_context):
     return SessionRunArgs(
         fetches=[],  # no extra fetches
         options=tf.RunOptions(report_tensor_allocations_upon_oom=True))