예제 #1
0
파일: rnn.py 프로젝트: orech/returnn
def initBackendEngine():
    BackendEngine.select_engine(config=config)
    if BackendEngine.is_theano_selected():
        print("Theano:", describe_theano_version(), file=log.v3)
        import TheanoUtil
        TheanoUtil.monkey_patches()
    elif BackendEngine.is_tensorflow_selected():
        print("TensorFlow:", describe_tensorflow_version(), file=log.v3)
        if get_tensorflow_version_tuple()[0] == 0:
            print("Warning: TF <1.0 is not supported and likely broken.",
                  file=log.v2)
        if os.environ.get("TF_DEVICE"):
            print("Devices: Use %s via TF_DEVICE instead of %s." %
                  (os.environ.get("TF_DEVICE"),
                   config.opt_typed_value("device")),
                  file=log.v4)
            config.set("device", os.environ.get("TF_DEVICE"))
        if config.is_true("use_horovod"):
            import socket
            import horovod.tensorflow as hvd
            from TFUtil import init_horovod
            init_horovod()  # make sure it is initialized
            if "gpu" in config.value("device", "") or os.environ.get(
                    "CUDA_VISIBLE_DEVICES", ""):
                # We assume that we want to use a GPU.
                gpu_opts = config.typed_dict.setdefault("tf_session_opts",
                                                        {}).setdefault(
                                                            "gpu_options", {})
                assert "visible_device_list" not in gpu_opts
                gpu_opts["visible_device_list"] = str(hvd.local_rank())
                print("Horovod: Hostname %s, pid %i, using GPU %s." %
                      (socket.gethostname(), os.getpid(),
                       gpu_opts["visible_device_list"]),
                      file=log.v3)
            else:
                if hvd.rank() == 0:  # Don't spam in all ranks.
                    print("Horovod: Not using GPU.", file=log.v3)
            horovod_reduce_type = config.value("horovod_reduce_type", "")
            if horovod_reduce_type == "":
                horovod_reduce_type = "grad"
                config.set("horovod_reduce_type", horovod_reduce_type)
            else:
                assert horovod_reduce_type in [
                    "grad", "param"
                ], "config option 'horovod_reduce_type' invalid"
            if hvd.rank() == 0:  # Don't spam in all ranks.
                print("Horovod: Reduce type:",
                      horovod_reduce_type,
                      file=log.v3)
        from TFUtil import debugRegisterBetterRepr, setup_tf_thread_pools, print_available_devices
        tf_session_opts = config.typed_value("tf_session_opts", {})
        assert isinstance(tf_session_opts, dict)
        # This must be done after the Horovod logic, such that we only touch the devices we are supposed to touch.
        setup_tf_thread_pools(log_file=log.v3, tf_session_opts=tf_session_opts)
        # Print available devices. Also make sure that get_tf_list_local_devices uses the correct TF session opts.
        print_available_devices(tf_session_opts=tf_session_opts, file=log.v2)
        debugRegisterBetterRepr()
    else:
        raise NotImplementedError
예제 #2
0
파일: rnn.py 프로젝트: ZhangAustin/returnn
def initBackendEngine():
    BackendEngine.select_engine(config=config)
    if BackendEngine.is_theano_selected():
        print("Theano:", describe_theano_version(), file=log.v3)
    elif BackendEngine.is_tensorflow_selected():
        print("TensorFlow:", describe_tensorflow_version(), file=log.v3)
        if get_tensorflow_version_tuple()[0] == 0:
            print("Warning: TF <1.0 is not supported and likely broken.",
                  file=log.v2)
        from TFUtil import debugRegisterBetterRepr
        debugRegisterBetterRepr()
    else:
        raise NotImplementedError
예제 #3
0
def initBackendEngine():
    BackendEngine.select_engine(config=config)
    if BackendEngine.is_theano_selected():
        print >> log.v3, "Theano:", describe_theano_version()
    elif BackendEngine.is_tensorflow_selected():
        print >> log.v3, "TensorFlow:", describe_tensorflow_version()
        from Util import to_bool
        from TFUtil import debugRegisterBetterRepr
        if os.environ.get("DEBUG_TF_BETTER_REPR") and to_bool(
                os.environ.get("DEBUG_TF_BETTER_REPR")):
            debugRegisterBetterRepr()
    else:
        raise NotImplementedError
예제 #4
0
파일: rnn.py 프로젝트: rwth-i6/returnn
def init_backend_engine():
  """
  Initializes ``engine``, which is either :class:`TFEngine.Engine` or Theano :class:`Engine.Engine`.
  """
  BackendEngine.select_engine(config=config)
  if BackendEngine.is_theano_selected():
    print("Theano:", describe_theano_version(), file=log.v3)
    import TheanoUtil
    TheanoUtil.monkey_patches()
  elif BackendEngine.is_tensorflow_selected():
    print("TensorFlow:", describe_tensorflow_version(), file=log.v3)
    if get_tensorflow_version_tuple()[0] == 0:
      print("Warning: TF <1.0 is not supported and likely broken.", file=log.v2)
    if os.environ.get("TF_DEVICE"):
      print("Devices: Use %s via TF_DEVICE instead of %s." % (
        os.environ.get("TF_DEVICE"), config.opt_typed_value("device")), file=log.v4)
      config.set("device", os.environ.get("TF_DEVICE"))
    if config.is_true("use_horovod"):
      import socket
      # noinspection PyPackageRequirements,PyUnresolvedReferences
      import horovod.tensorflow as hvd
      from TFUtil import init_horovod
      init_horovod()  # make sure it is initialized
      if "gpu" in config.value("device", "") or os.environ.get("CUDA_VISIBLE_DEVICES", ""):
        # We assume that we want to use a GPU.
        gpu_opts = config.typed_dict.setdefault("tf_session_opts", {}).setdefault("gpu_options", {})
        assert "visible_device_list" not in gpu_opts
        gpu_opts["visible_device_list"] = str(hvd.local_rank())
        print("Horovod: Hostname %s, pid %i, using GPU %s." % (
          socket.gethostname(), os.getpid(), gpu_opts["visible_device_list"]), file=log.v3)
      else:
        if hvd.rank() == 0:  # Don't spam in all ranks.
          print("Horovod: Not using GPU.", file=log.v3)
      horovod_reduce_type = config.value("horovod_reduce_type", "")
      if horovod_reduce_type == "":
        horovod_reduce_type = "grad"
        config.set("horovod_reduce_type", horovod_reduce_type)
      else:
        assert horovod_reduce_type in ["grad", "param"], "config option 'horovod_reduce_type' invalid"
      if hvd.rank() == 0:  # Don't spam in all ranks.
        print("Horovod: Reduce type:", horovod_reduce_type, file=log.v3)
    from TFUtil import debug_register_better_repr, setup_tf_thread_pools, print_available_devices
    tf_session_opts = config.typed_value("tf_session_opts", {})
    assert isinstance(tf_session_opts, dict)
    # This must be done after the Horovod logic, such that we only touch the devices we are supposed to touch.
    setup_tf_thread_pools(log_file=log.v3, tf_session_opts=tf_session_opts)
    # Print available devices. Also make sure that get_tf_list_local_devices uses the correct TF session opts.
    print_available_devices(tf_session_opts=tf_session_opts, file=log.v2)
    debug_register_better_repr()
  else:
    raise NotImplementedError