Example #1
0
 def __init__(self, in_info, out_info, lua_fw_func, lua_bw_func=None, lua_file=None, name=None):
   _init()
   super(TorchWrapperOp, self).__init__()
   self.in_info = make_hashable(in_info)
   self.out_info = make_hashable(out_info)
   self.lua_file = lua_file  # if none, expect inplace definition
   self.lua_fw_func = lua_fw_func
   self.lua_bw_func = lua_bw_func
   self.name = name or "<anonymous>"
   for info in self.in_info + self.out_info:
     assert "ndim" in info
     assert "shape" in info
     assert len(info["shape"]) == info["ndim"]
   for info in self.out_info:
     for s in info["shape"]:
       assert s, "need output shape info or reference, %r" % info
Example #2
0
 def get_global_instance(cls, sprint_opts):
   sprint_opts = make_hashable(sprint_opts)
   if sprint_opts in cls.global_instances:
     return cls.global_instances[sprint_opts]
   instance = SprintInstancePool(sprint_opts=sprint_opts)
   cls.global_instances[sprint_opts] = instance
   return instance
Example #3
0
 def get_global_instance(cls, sprint_opts):
     sprint_opts = make_hashable(sprint_opts)
     if sprint_opts in cls.global_instances:
         return cls.global_instances[sprint_opts]
     instance = SprintInstancePool(sprint_opts=sprint_opts)
     cls.global_instances[sprint_opts] = instance
     return instance
Example #4
0
 def __init__(self, in_info, out_info,
              c_fw_code, c_bw_code=None, c_extra_support_code=None, code_version=None,
              grad_input_map=None, name=None):
   """
   :param list[dict(str)] in_info: each dict describes one input var.
     attribs in the dict:
       int ndim: the ndim.
       tuple shape: tuple and can contain None for specific dimensions.
     optional attribs:
       str dtype: "float32" by default.
       bool need_contiguous: false by default.
       int want_inplace: -1 by default. try to optimize to destroy input, on output-index.
         "dummy_out" is a special value which will add another output.
       bool is_inplace: false by default. whether the optimization was applied.
       str gradient: can be "disconnected". see grad().
       bool bw_input: True by default. add this param to the bw input.
     other attribs are just ignored.
   :param list[dict(str)] out_info: like in_info.
     slightly different behavior for:
       shape: we also allow refs to the in_info in the form (in-idx,dim). see infer_shape().
       need_contiguous/want_inplace: used for bw, in case for bw_input == True.
   :param str c_fw_code: C code for forward pass
   :param str c_extra_support_code: C support code (for c_support_code)
   :param str|None c_bw_code: C code for backward pass (for gradient)
   :param tuple[int] code_version: will be returned by c_code_cache_version.
   :param tuple|callable grad_input_map: selection of grad inputs.
     by default, we get all inputs + all outputs + all grad outputs.
   :param str name: name
   """
   super(NativeOp, self).__init__()
   assert isinstance(in_info, (list, tuple))
   assert isinstance(out_info, (list, tuple))
   in_info, out_info, num_dummy_outs = self._resolve_want_inplace_dummy(in_info, out_info)
   self.in_info = make_hashable(in_info)
   self.out_info = make_hashable(out_info)
   self.num_dummy_outs = num_dummy_outs
   self.c_fw_code = c_fw_code
   self.c_bw_code = c_bw_code
   self.c_extra_support_code = self._reduce_c_extra_support_code(c_extra_support_code)
   self.code_version = code_version or ()
   self.name = name or "<anonNativeOp>"
   self.grad_input_map = self._convert_grad_input_map(grad_input_map, len(in_info) + len(out_info) * 2)
   self.destroy_map = self._construct_destroy_map(in_info)
Example #5
0
 def _get_optimizer_item_for_opts(self, optimizer_opts, auto_create_new):
   """
   :param dict[str]|str|None optimizer_opts:
   :param bool auto_create_new:
   :return: key, optimizer
   :rtype: (object, tf.train.Optimizer)
   """
   from Util import make_hashable
   key = make_hashable(optimizer_opts)
   if key in self.optimizers:
     return key, self.optimizers[key]
   assert auto_create_new, "no optimizer found for opts %r" % (optimizer_opts,)
   optimizer = self._create_optimizer(optimizer_opts)
   self.optimizers[key] = optimizer
   return key, optimizer
Example #6
0
def generate_hdf_from_other(opts):
  """
  :param dict[str] opts:
  :return: hdf filename
  :rtype: str
  """
  # See test_hdf_dump.py and tools/hdf_dump.py.
  from Util import make_hashable
  cache_key = make_hashable(opts)
  if cache_key in _hdf_cache:
    return _hdf_cache[cache_key]
  fn = _get_tmp_file(suffix=".hdf")
  from Dataset import init_dataset
  dataset = init_dataset(opts)
  hdf_dataset = HDFDatasetWriter(fn)
  hdf_dataset.dump_from_dataset(dataset)
  hdf_dataset.close()
  _hdf_cache[cache_key] = fn
  return fn
Example #7
0
def generate_hdf_from_other(opts):
  """
  :param dict[str] opts:
  :return: hdf filename
  :rtype: str
  """
  # See test_hdf_dump.py and tools/hdf_dump.py.
  from Util import make_hashable
  cache_key = make_hashable(opts)
  if cache_key in _hdf_cache:
    return _hdf_cache[cache_key]
  fn = _get_tmp_file(suffix=".hdf")
  from Dataset import init_dataset
  dataset = init_dataset(opts)
  hdf_dataset = HDFDatasetWriter(fn)
  hdf_dataset.dump_from_dataset(dataset)
  hdf_dataset.close()
  _hdf_cache[cache_key] = fn
  return fn
Example #8
0
  def get_apply_grads_op(self, loss, var_list):
    """
    :param tf.Tensor loss:
    :param list[tf.Variable] var_list:
    :return: op with all variable updates combined, using the optimizer
    :rtype: tf.Operation
    """
    # The following code is basically extended self.optimizer.minimize(), to optionally modify gradients.
    from Util import make_hashable
    if not var_list:
      return tf.no_op(name="no_grad_vars_no_op")

    grads_and_vars = self._compute_gradients(loss, var_list=var_list)
    if self.config.is_true("use_horovod") and self.config.value("horovod_reduce_type", "") == "grad":
      # noinspection PyPackageRequirements,PyUnresolvedReferences
      import horovod.tensorflow as hvd
      grads_and_vars = [
        (hvd.allreduce(grad, average=self.config.is_true("horovod_avg_grad")) if grad is not None else None, var)
        for (grad, var) in grads_and_vars]

    var_grads = {var: grad for (grad, var) in grads_and_vars if grad is not None}
    if not var_grads:
      raise Exception("no single variable to train")
    global_info = self._GetGlobalInfo(optimizer=self, all_vars=var_list, var_grads=var_grads)
    if self.config.bool_or_other("debug_grad_summaries", False):
      tf.summary.scalar("global_grad_norm", global_info.get_global_grad_norm())
    grads_per_apply_grad_opts = {}  # dict apply_grad_opts -> list of (grad, var)
    for grad, var in grads_and_vars:
      assert var in var_list
      if grad is None:
        continue
      new_grad, apply_grad_opts = self._post_process_grad(grad=grad, var=var, global_info=global_info)
      grads_per_apply_grad_opts.setdefault(make_hashable(apply_grad_opts), []).append((new_grad, var))

    all_apply_grads = []
    assert grads_per_apply_grad_opts
    for apply_grad_opts, grads_and_vars_per_opts in grads_per_apply_grad_opts.items():
      all_apply_grads.append(self._apply_gradients(grads_and_vars_per_opts, **apply_grad_opts))
    if len(all_apply_grads) == 1:
      return all_apply_grads[0]
    return tf.group(*all_apply_grads)
Example #9
0
 def __init__(self, sprint_opts):
   super(SprintAlignmentAutomataOp, self).__init__()
   self.sprint_opts = make_hashable(sprint_opts)
   self.sprint_instance_pool = None  # type: typing.Optional[SprintInstancePool]
Example #10
0
 def __init__(self, sprint_opts):
   super(SprintErrorSigOp, self).__init__()
   self.sprint_opts = make_hashable(sprint_opts)
   self.sprint_instance_pool = None  # type: typing.Optional[SprintInstancePool]
   self.debug_perform_time = None
Example #11
0
 def __init__(self, sprint_opts):
     super(SprintAlignmentAutomataOp, self).__init__()
     self.sprint_opts = make_hashable(sprint_opts)
     self.sprint_instance_pool = None  # type: typing.Optional[SprintInstancePool]
Example #12
0
 def __init__(self, sprint_opts):
     super(SprintErrorSigOp, self).__init__()
     self.sprint_opts = make_hashable(sprint_opts)
     self.sprint_instance_pool = None
     self.debug_perform_time = None
Example #13
0
 def __init__(self, target, sprint_opts):
   super(SprintErrorSigOp, self).__init__()
   self.target = target  # default is "classes"
   self.sprint_opts = make_hashable(sprint_opts)
   self.sprint_instance_pool = None
Example #14
0
 def __init__(self, sprint_opts):
   super(SprintErrorSigOp, self).__init__()
   self.sprint_opts = make_hashable(sprint_opts)
   self.sprint_instance_pool = None
   self.debug_perform_time = None
Example #15
0
 def __init__(self, sprint_opts):
     super(SprintAlignmentAutomataOp, self).__init__()
     self.sprint_opts = make_hashable(sprint_opts)
     self.sprint_instance_pool = None
Example #16
0
 def __init__(self, sprint_opts):
   super(SprintAlignmentAutomataOp, self).__init__()
   self.sprint_opts = make_hashable(sprint_opts)
   self.sprint_instance_pool = None
Example #17
0
 def __init__(self, sprint_opts):
     super(SprintErrorSigOp, self).__init__()
     self.sprint_opts = make_hashable(sprint_opts)
     self.sprint_instance_pool = None  # type: typing.Optional[SprintInstancePool]
     self.debug_perform_time = None