Esempio n. 1
0
def _execute(command):
    """ Execute the given command in the current directory, print error messages if the command failed.
  Args:
    command Command to execute
  Returns:
    Whether the operation is a success
  """
    with tools.Context(None, "info"):
        sys.stdout.write("Executing " + repr(command) + "...")
        sys.stdout.flush()
    command = subprocess.run(shlex.split(command),
                             stdout=subprocess.PIPE,
                             stderr=subprocess.PIPE)
    success = command.returncode == 0
    with tools.Context(None, "info" if success else "warning"):
        if success:
            print(" done")
        else:
            print(" fail (" + str(command.returncode) + ")")
        for stdname in ("stdout", "stderr"):
            text = getattr(command, stdname).decode("utf8")
            if len(text) > 0:
                with tools.Context(stdname, "trace"):
                    print(text)
    return success
Esempio n. 2
0
def _loader():
  """ Incrementally rebuild all libraries and register all local operations.
  """
  try:
    # Check if the CUDA compiler is available
    nocuda = True
    for path in os.environ["PATH"].split(os.pathsep):
      if (pathlib.Path(path) / _build_cudabin).exists():
        nocuda = False
        break
    # List all common headers
    headers = []
    if _build_include is not None:
      for path in _build_include.iterdir():
        if path.suffix in _build_exts_hdr:
          headers.append(path)
    # Compile libraries and load OP
    doneset = set()
    failset = set()
    for dirpath in pathlib.Path(__file__).resolve().parent.iterdir():
      ident = dirpath.name[:3]
      if dirpath.is_dir() and ident in _loader_hooks.keys(): # Is a library directory
        if dirpath not in doneset and dirpath not in failset:
          so_path = _build_library(dirpath, doneset, failset, headers, nocuda=nocuda)
          loader  = _loader_hooks[ident]
          if so_path is not None and loader is not None: # Successful build and loader needed
            loader(so_path)
  except Exception as err:
    with tools.Context(ident, "warning"):
      print("Loading failed while compiling " + repr(ident) + ": " + str(err))
      with tools.Context("traceback", "trace"):
        traceback.print_exc()
Esempio n. 3
0
def _loader_ctypes(so_path):
  """ Post-building ctypes loading operations.
  Args:
    so_path Shared object path
  """
  try:
    lib = ctypes.CDLL(str(so_path))
    register_py(so_path.stem[3:], lambda: lib)
  except Exception as err:
    with tools.Context(so_path.stem, "warning"):
      print("Loading failed for python interface " + repr(str(so_path)) + ": " + str(err))
      with tools.Context("traceback", "trace"):
        traceback.print_exc()
Esempio n. 4
0
 def _run(topdir, name, seed, device, command):
   """ Run the attack experiments with the given named parameters.
   Args:
     topdir  Parent result directory
     name    Experiment unique name
     seed    Experiment seed
     device  Device on which to run the experiments
     command Command to run
   """
   # Add seed to name
   name = "%s-%d" % (name, seed)
   # Process experiment
   with tools.Context(name, "info"):
     finaldir = topdir / name
     # Check whether the experiment was already successful
     if finaldir.exists():
       tools.info("Experiment already processed.")
       return
     # Move-make the pending result directory
     resdir = move_directory(topdir / f"{name}.pending")
     resdir.mkdir(mode=0o755, parents=True)
     # Build the command
     args = command.build(seed, device, resdir)
     # Launch the experiment and write the standard output/error
     tools.trace((" ").join(shlex.quote(arg) for arg in args))
     cmd_res = subprocess.run(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
     if cmd_res.returncode == 0:
       tools.info("Experiment successful")
     else:
       tools.warning("Experiment failed")
       finaldir = topdir / f"{name}.failed"
       move_directory(finaldir)
     resdir.rename(finaldir)
     (finaldir / "stdout.log").write_bytes(cmd_res.stdout)
     (finaldir / "stderr.log").write_bytes(cmd_res.stderr)
Esempio n. 5
0
 def __init__(self, dataset, model, args):
     # Parse key:val arguments
     nbcores = len(os.sched_getaffinity(0))
     if nbcores == 0:
         nbcores = 4  # Arbitrary fallback
     args = tools.parse_keyval(args,
                               defaults={
                                   "batch-size": 32,
                                   "eval-batch-size": 1024,
                                   "weight-decay": 0.00004,
                                   "label-smoothing": 0.,
                                   "labels-offset": 0,
                                   "nb-fetcher-threads": nbcores,
                                   "nb-batcher-threads": nbcores
                               })
     if args["batch-size"] <= 0:
         raise tools.UserException(
             "Cannot make batches of non-positive size")
     # Report experiments
     with tools.Context("slim", None):
         print("Dataset name in use:   " + repr(dataset[0]) + " (in " +
               repr(dataset[1]) + ")")
         print("Dataset preprocessing: " +
               (repr(args["preprocessing"]) if "preprocessing" in
                args else "<model default>"))
         print("Model name in use:     " + repr(model))
     # Finalization
     self.__args = args
     self.__dataset = dataset
     self.__preproc = args[
         "preprocessing"] if "preprocessing" in args else model
     self.__model = model
     self.__cntr_wk = 0  # Worker instantiation counter
     self.__cntr_ev = 0  # Evaluator instantiation counter
Esempio n. 6
0
def _loader_ops(so_path):
  """ Post-building custom ops loading operations.
  Args:
    so_path Shared object path
  """
  try:
    lib = tf.load_op_library(str(so_path))
    entries = lib.OP_LIST.ListFields()[0][1]
    try:
      while True:
        opname = entries.pop().ListFields()[0][1]
        opname = _camel_to_snake(opname)
        register_op(opname, getattr(lib, opname))
    except IndexError:
      pass
  except Exception as err:
    with tools.Context(so_path.stem, "warning"):
      print("Loading failed for custom op " + repr(str(so_path)) + ": " + str(err))
      with tools.Context("traceback", "trace"):
        traceback.print_exc()
    def _run(name, seed, device, params):
        """ Run the attack experiments with the given named parameters.
    Args:
      name   Experiment unique name
      seed   Experiment seed
      device Device on which to run the experiments
      params Named parameters
    """
        # Add seed to name
        name = "%s-%d" % (name, seed)
        # Process experiment
        with tools.Context(name, "info"):
            # Build and set the result directory
            result_dir = args.data_directory / name
            if result_dir.exists():
                tools.info("Experiment already processed.")
                return
            result_dir.mkdir(mode=0o755, parents=True)
            # Add the missing options
            params["seed"] = str(seed)
            params["device"] = device
            params["result-directory"] = str(result_dir)

            # Launch the experiment and write the standard output/error
            def is_multi_param(param):
                return any(isinstance(param, typ) for typ in (list, tuple))

            def param_to_str(param):
                if is_multi_param(param):
                    return (" ").join(shlex.quote(str(val)) for val in param)
                return shlex.quote(str(param))

            tools.trace("python3 -OO attack.py %s" %
                        (" ").join("--%s %s" % (key, param_to_str(val))
                                   for key, val in params.items()))
            command = ["python3", "-OO", "attack.py"]
            for key, val in params.items():
                command.append("--%s" % (key, ))
                if is_multi_param(val):
                    for subval in val:
                        command.append(str(subval))
                else:
                    command.append(str(val))
            cmd_res = subprocess.run(command,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE)
            if cmd_res.returncode == 0:
                tools.info("Experiment successful")
            else:
                tools.warning("Experiment failed")
            (result_dir / "stdout.log").write_bytes(cmd_res.stdout)
            (result_dir / "stderr.log").write_bytes(cmd_res.stderr)
Esempio n. 8
0
 def _get_models(self):
   """ Lazy-initialize and return the map '__models'.
   Returns:
     '__models'
   """
   # Fast-path already loaded
   if self.__models is not None:
     return self.__models
   # Initialize the dictionary
   self.__models = dict()
   # Populate this dictionary with TorchVision's models
   for name in dir(torchvision.models):
     if len(name) == 0 or name[0] == "_": # Ignore "protected" members
       continue
     builder = getattr(torchvision.models, name)
     if isinstance(builder, types.FunctionType): # Heuristic
       self.__models["torchvision-%s" % name.lower()] = builder
   # Dynamically add the custom models from subdirectory 'models/'
   def add_custom_models(name, module, _):
     nonlocal self
     # Check if has exports, fallback otherwise
     exports = getattr(module, "__all__", None)
     if exports is None:
       tools.warning("Model module %r does not provide '__all__'; falling back to '__dict__' for name discovery" % name)
       exports = (name for name in dir(module) if len(name) > 0 and name[0] != "_")
     # Register the association 'name -> constructor' for all the models
     exported = False
     for model in exports:
       # Check model name type
       if not isinstance(model, str):
         tools.warning("Model module %r exports non-string name %r; ignored" % (name, model))
         continue
       # Recover instance from name
       constructor = getattr(module, model, None)
       # Check instance is callable (it's only an heuristic...)
       if not callable(constructor):
         continue
       # Register callable with composite name
       exported = True
       fullname = "%s-%s" % (name, model)
       if fullname in self.__models:
         tools.warning("Unable to make available model %r from module %r, as the name %r already exists" % (model, name, fullname))
         continue
       self.__models[fullname] = constructor
     if not exported:
       tools.warning("Model module %r does not export any valid constructor name through '__all__'" % name)
   with tools.Context("models", None):
     tools.import_directory(pathlib.Path(__file__).parent / "models", {"__package__": "%s.models" % __package__}, post=add_custom_models)
   # Return the dictionary
   return self.__models
Esempio n. 9
0
 def __init__(self, args):
   # Parse key:val arguments
   args = tools.parse_keyval(args, defaults={"batch-size": 32})
   if args["batch-size"] <= 0:
     raise tools.UserException("Cannot make batches of non-positive size")
   # Report loading
   with tools.Context("mnist", None):
     print("Loading MNIST dataset...")
     raw_data = tf.keras.datasets.mnist.load_data()
   # Finalization
   self.__args     = args
   self.__raw_data = raw_data
   self.__datasets = None
   self.__cntr_wk  = 0 # Worker instantiation counter
   self.__cntr_ev  = 0 # Evaluator instantiation counter
Esempio n. 10
0
def phishing(train=True, batch_size=None, root=None, download=False, *args, **kwargs):
  """ Phishing dataset generator builder.
  Args:
    train      Whether to get the training slice of the dataset
    batch_size Batch size (None or 0 for all in one single batch)
    root       Dataset cache root directory (None for default)
    download   Whether to allow to download the dataset if not cached locally
    ...        Ignored supplementary (keyword-)arguments
  Returns:
    Associated ataset generator
  """
  with tools.Context("phishing", None):
    # Get the raw dataset
    inputs, labels = get_phishing(root or default_root, None if download is None else default_url_phishing)
    # Make and return the associated generator
    return experiments.batch_dataset(inputs, labels, train, batch_size, split=8400)  # 8400 = 2⁴ × 3 × 5² × 7 (should help with divisibility)
Esempio n. 11
0
    def __call__(self, *args, **kwargs):
        try:
            ctx = tools.Context(hexchat.get_context())
            return self.func(ctx, *args, **kwargs)
        except:
            tb = traceback.format_exc()
            if callable(self.on_error):
                try:
                    retval = self.on_error(*args, **kwargs)
                except:
                    tb = traceback.format_exc()
            else:
                retval = self.on_error

            errctx = get_error_context()
            errctx.prnt(tb)
            return retval
Esempio n. 12
0
      trace          Whether to add trace prints for every important step of the computations
    Returns:
      List of loss tensors associated with 'device_models'
    """
    raise NotImplementedError

  def accuracy(self, device_dataset, device_model, trace=False):
    """ Build an accuracy tensor on the specified devices, placement on parameter server by default.
    Args:
      device_dataset Dataset device name/function (same instance between calls if same task, i.e. can use 'is')
      device_models  Model device names/functions, one per worker on the associated task
      trace          Whether to add trace prints for every important step of the computations
    Returns:
      Map of metric string name -> aggregated metric tensor associated with 'device_models'
    """
    raise NotImplementedError

# ---------------------------------------------------------------------------- #
# Experiment register and loader

# Register instance
_register   = tools.ClassRegister("experiment")
itemize     = _register.itemize
register    = _register.register
instantiate = _register.instantiate
del _register

# Load all local modules
with tools.Context("experiments", None):
  tools.import_directory(pathlib.Path(__file__).parent, globals())
Esempio n. 13
0
    from .slim.preprocessing import preprocessing_factory
    from .slim.nets.nets_factory import networks_map

# List available models
models = list(networks_map.keys())
if len(models) == 0:
    raise tools.UserException("no model available in slim package")

# List available datasets
datasets = dict()
dspath = pathlib.Path(__file__).parent / "datasets"
if not dspath.is_dir():
    raise tools.UserException("slim dataset at 'datasets' must be a directory")
for path in dspath.iterdir():
    if not tools.can_access(path, read=True):
        with tools.Context(None, "warning"):
            print("slim dataset " + repr(path.name + "/*") +
                  " in 'datasets' is not read-able and has been ignored")
        continue
    if not path.is_dir():  # Must be after to first check for access rights...
        continue
    datasets[path.name] = str(path)
if len(datasets) == 0:
    raise tools.UserException("no dataset available in slim package")

# Register cross-product models-datasets
for model in models:
    for dspair in datasets.items():
        register("slim-" + model + "-" + dspair[0],
                 SlimExperiment._make(dspair, model))
Esempio n. 14
0
 def _get_datasets(self):
   """ Lazy-initialize and return the map '__datasets'.
   Returns:
     '__datasets'
   """
   global transforms
   # Fast-path already loaded
   if self.__datasets is not None:
     return self.__datasets
   # Initialize the dictionary
   self.__datasets = dict()
   # Populate this dictionary with TorchVision's datasets
   for name in dir(torchvision.datasets):
     if len(name) == 0 or name[0] == "_": # Ignore "protected" members
       continue
     constructor = getattr(torchvision.datasets, name)
     if isinstance(constructor, type): # Heuristic
       def make_builder(constructor, name):
         def builder(root, batch_size=None, shuffle=False, num_workers=1, *args, **kwargs):
           # Try to build the dataset instance
           data = constructor(root, *args, **kwargs)
           assert isinstance(data, torch.utils.data.Dataset), f"Internal heuristic failed: {name!r} was not a dataset name"
           # Ensure there is at least a tensor transformation for each torchvision dataset
           if name not in transforms:
             transforms[name] = torchvision.transforms.ToTensor()
           # Wrap into a loader
           batch_size = batch_size or len(data)
           loader = torch.utils.data.DataLoader(data, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
           # Wrap into an infinite batch sampler generator
           return make_sampler(loader)
         return builder
       self.__datasets[name.lower()] = make_builder(constructor, name)
   # Dynamically add the custom datasets from subdirectory 'datasets/'
   def add_custom_datasets(name, module, _):
     nonlocal self
     # Check if has exports, fallback otherwise
     exports = getattr(module, "__all__", None)
     if exports is None:
       tools.warning(f"Dataset module {name!r} does not provide '__all__'; falling back to '__dict__' for name discovery")
       exports = (name for name in dir(module) if len(name) > 0 and name[0] != "_")
     # Register the association 'name -> constructor' for all the datasets
     exported = False
     for dataset in exports:
       # Check dataset name type
       if not isinstance(dataset, str):
         tools.warning(f"Dataset module {name!r} exports non-string name {dataset!r}; ignored")
         continue
       # Recover instance from name
       constructor = getattr(module, dataset, None)
       # Check instance is callable (it's only an heuristic...)
       if not callable(constructor):
         continue
       # Register callable with composite name
       exported = True
       fullname = f"{name}-{dataset}"
       if fullname in self.__datasets:
         tools.warning(f"Unable to make available dataset {dataset!r} from module {name!r}, as the name {fullname!r} already exists")
         continue
       self.__datasets[fullname] = constructor
     if not exported:
       tools.warning(f"Dataset module {name!r} does not export any valid constructor name through '__all__'")
   with tools.Context("datasets", None):
     tools.import_directory(pathlib.Path(__file__).parent / "datasets", {"__package__": f"{__package__}.datasets"}, post=add_custom_datasets)
   # Return the dictionary
   return self.__datasets
Esempio n. 15
0
  # Return the selected function with the associated name
  return func

def register(name, unchecked, check, upper_bound=None, influence=None):
  """ Simple registration-wrapper helper.
  Args:
    name        GAR name
    unchecked   Associated function (see module description)
    check       Parameter validity check function
    upper_bound Compute the theoretical upper bound on the ratio non-Byzantine standard deviation / norm to use this aggregation rule: (n, f, d) -> float
    influence   Attack acceptation ratio function
  """
  global gars
  # Check if name already in use
  if name in gars:
    tools.warning("Unable to register %r GAR: name already in use" % name)
    return
  # Export the selected function with the associated name
  gars[name] = make_gar(unchecked, check, upper_bound=upper_bound, influence=influence)

# Registered rules (mapping name -> aggregation rule)
gars = dict()

# Load all local modules
with tools.Context("aggregators", None):
  tools.import_directory(pathlib.Path(__file__).parent, globals())

# Bind/overwrite the GAR name with the associated rules in globals()
for name, rule in gars.items():
  globals()[name] = rule
Esempio n. 16
0
    help=
    "Redirect the standard output to the given file (overwritten if exists), '-' for none, '-' by default"
)
parser.add_argument(
    "--stderr-to",
    type=str,
    default="-",
    help=
    "Redirect the standard error output to the given file (overwritten if exists), '-' for none, '-' by default"
)
parser.add_argument("--MPI",
                    action="store_true",
                    default=False,
                    help="Run with MPI instead of gRPC.")

with tools.Context("args", "info"):
    # Command line parsing
    args = parser.parse_args(sys.argv[1:])
    # Early redirection handling
    if args.stdout_to != "-":
        path = pathlib.Path(args.stdout_to)
        sys.stdout = tools.MethodCallReplicator(
            sys.stdout, tools.ContextIOWrapper(path.open("w"), nocolor=True))
        sys.stdout.write("Duplicating standard output to " +
                         repr(str(path.resolve())) + os.linesep)
    if args.stderr_to != "-":
        path = pathlib.Path(args.stderr_to)
        sys.stderr = tools.MethodCallReplicator(
            sys.stderr, tools.ContextIOWrapper(path.open("w"), nocolor=True))
        sys.stderr.write("Duplicating standard error output to " +
                         repr(str(path.resolve())) + os.linesep)
Esempio n. 17
0
 def build_and_load_one(path, deps=[]):
     """ Check if the given directory is a module to build and load, and if yes recursively build and load its dependencies before it.
 Args:
   path Given directory path
   deps Dependent module paths
 Returns:
   True on success, False on failure, None if not a module
 """
     nonlocal done_modules
     nonlocal fail_modules
     with tools.Context(path.name, "info"):
         ident = path.name[:3]
         if ident in ident_to_is_python.keys():
             # Is a module directory
             if len(path.name) <= 3 or path.name[3] == "_":
                 tools.warning("Skipped invalid module directory name " +
                               repr(path.name))
                 return None
             if not path.exists():
                 tools.warning("Unable to build and load " +
                               repr(str(path.name)) +
                               ": module does not exist")
                 fail_modules.append(path)  # Mark as failed
                 return False
             is_python_module = ident_to_is_python[ident]
             # Check if already built and loaded, or failed
             if path in done_modules:
                 if len(deps) == 0 and debug_mode:
                     tools.info("Already built and loaded " +
                                repr(str(path.name)))
                 return True
             if path in fail_modules:
                 if len(deps) == 0:
                     tools.warning("Was unable to build and load " +
                                   repr(str(path.name)))
                 return False
             # Check for dependency cycle (disallowed as they may mess with the linker)
             if path in deps:
                 tools.warning("Unable to build and load " +
                               repr(str(path.name)) +
                               ": dependency cycle found")
                 fail_modules.append(path)  # Mark as failed
                 return False
             # Build and load dependencies
             this_ldflags = list(extra_ldflags)
             depsfile = path / dependencies_file
             if depsfile.exists():
                 for modname in depsfile.read_text().splitlines():
                     res = build_and_load_one(base_directory / modname,
                                              deps + [path])
                     if res == False:  # Unable to build a dependency
                         if len(deps) == 0:
                             tools.warning("Unable to build and load " +
                                           repr(str(path.name)) +
                                           ": dependency " + repr(modname) +
                                           " build and load failed")
                         fail_modules.append(path)  # Mark as failed
                         return False
                     elif res == True:  # Module and its sub-dependencies was/were built and loaded successfully
                         this_ldflags.append("-Wl,--library=:" + str(
                             (base_directory / modname /
                              (modname + ".so")).resolve()))
             # List sources
             sources = []
             for subpath in path.iterdir():
                 if subpath.is_file() and ("").join(
                         subpath.suffixes) in source_suffixes:
                     sources.append(str(subpath))
             # Build and load this module
             try:
                 res = torch.utils.cpp_extension.load(
                     name=path.name,
                     sources=sources,
                     extra_cflags=extra_cflags,
                     extra_cuda_cflags=extra_cuda_cflags,
                     extra_ldflags=this_ldflags,
                     extra_include_paths=extra_include_paths,
                     build_directory=str(path),
                     verbose=debug_mode,
                     is_python_module=is_python_module)
                 if is_python_module:
                     glob[path.name[3:]] = res
             except Exception as err:
                 tools.warning("Unable to build and load " +
                               repr(str(path.name)) + ": " + str(err))
                 fail_modules.append(path)  # Mark as failed
                 return False
             done_modules.append(path)  # Mark as built and loaded
             return True
Esempio n. 18
0
def _build_and_load():
    """ Incrementally rebuild all libraries and bind all local modules in the global.
  """
    glob = globals()
    # Standard imports
    import os
    import pathlib
    import traceback
    import warnings
    # External imports
    import torch
    import torch.utils.cpp_extension
    # Internal imports
    import tools
    # Constants
    base_directory = pathlib.Path(__file__).parent.resolve()
    dependencies_file = ".deps"
    debug_mode_envname = "NATIVE_OPT"
    debug_mode_in_env = debug_mode_envname in os.environ
    if debug_mode_in_env:
        raw = os.environ[debug_mode_envname]
        value = raw.lower()
        if value in ["0", "n", "no", "false"]:
            debug_mode = True
        elif value in ["1", "y", "yes", "true"]:
            debug_mode = False
        else:
            tools.fatal(
                "%r defined in the environment, but with unexpected soft-boolean %r"
                % (debug_mode_envname, "%s=%s" % (debug_mode_envname, raw)))
    else:
        debug_mode = __debug__
    cpp_std_envname = "NATIVE_STD"
    cpp_std = os.environ.get(cpp_std_envname, "c++14")
    ident_to_is_python = {"so_": False, "py_": True}
    source_suffixes = {".cpp", ".cc", ".C", ".cxx", ".c++"}
    extra_cflags = ["-Wall", "-Wextra", "-Wfatal-errors", "-std=%s" % cpp_std]
    if torch.cuda.is_available():
        source_suffixes.update(
            set((".cu" + suffix) for suffix in source_suffixes))
        source_suffixes.add(".cu")
        extra_cflags.append("-DTORCH_CUDA_AVAILABLE")
    extra_cuda_cflags = [
        "-DTORCH_CUDA_AVAILABLE", "--expt-relaxed-constexpr",
        "-std=%s" % cpp_std
    ]
    extra_ldflags = ["-Wl,-L" + base_directory.root]
    extra_include_path = base_directory / "include"
    try:
        extra_include_paths = [str(extra_include_path.resolve())]
    except Exception:
        extra_include_paths = None
        warnings.warn("Not found include directory: " +
                      repr(str(extra_include_path)))
    # Print configuration information
    cpp_std_message = "Native modules compiled with %s standard; (re)define %r in the environment to compile with another standard" % (
        cpp_std, "%s=<standard>" % cpp_std_envname)
    if debug_mode:
        tools.warning(cpp_std_message)
        tools.warning(
            "Native modules compiled in debug mode; %sdefine %r in the environment or%s run python with -O/-OO options to compile in release mode"
            % ("re" if debug_mode_in_env else "", "%s=1" % debug_mode_envname,
               " undefine it and" if debug_mode_in_env else ""))
        extra_cflags += ["-O0", "-g"]
    else:
        quiet_envname = "NATIVE_QUIET"
        if quiet_envname not in os.environ:
            tools.trace(cpp_std_message)
            tools.trace(
                "Native modules compiled in release mode; %sdefine %r in the environment or%s run python without -O/-OO options to compile in debug mode"
                % ("re" if debug_mode_in_env else "",
                   "%s=0" % debug_mode_envname,
                   " undefine it and" if debug_mode_in_env else ""))
            tools.trace(
                "Define %r in the environment to hide these messages in release mode"
                % quiet_envname)
        extra_cflags += ["-O3", "-DNDEBUG"]
    # Variables
    done_modules = []
    fail_modules = []

    # Local procedures
    def build_and_load_one(path, deps=[]):
        """ Check if the given directory is a module to build and load, and if yes recursively build and load its dependencies before it.
    Args:
      path Given directory path
      deps Dependent module paths
    Returns:
      True on success, False on failure, None if not a module
    """
        nonlocal done_modules
        nonlocal fail_modules
        with tools.Context(path.name, "info"):
            ident = path.name[:3]
            if ident in ident_to_is_python.keys():
                # Is a module directory
                if len(path.name) <= 3 or path.name[3] == "_":
                    tools.warning("Skipped invalid module directory name " +
                                  repr(path.name))
                    return None
                if not path.exists():
                    tools.warning("Unable to build and load " +
                                  repr(str(path.name)) +
                                  ": module does not exist")
                    fail_modules.append(path)  # Mark as failed
                    return False
                is_python_module = ident_to_is_python[ident]
                # Check if already built and loaded, or failed
                if path in done_modules:
                    if len(deps) == 0 and debug_mode:
                        tools.info("Already built and loaded " +
                                   repr(str(path.name)))
                    return True
                if path in fail_modules:
                    if len(deps) == 0:
                        tools.warning("Was unable to build and load " +
                                      repr(str(path.name)))
                    return False
                # Check for dependency cycle (disallowed as they may mess with the linker)
                if path in deps:
                    tools.warning("Unable to build and load " +
                                  repr(str(path.name)) +
                                  ": dependency cycle found")
                    fail_modules.append(path)  # Mark as failed
                    return False
                # Build and load dependencies
                this_ldflags = list(extra_ldflags)
                depsfile = path / dependencies_file
                if depsfile.exists():
                    for modname in depsfile.read_text().splitlines():
                        res = build_and_load_one(base_directory / modname,
                                                 deps + [path])
                        if res == False:  # Unable to build a dependency
                            if len(deps) == 0:
                                tools.warning("Unable to build and load " +
                                              repr(str(path.name)) +
                                              ": dependency " + repr(modname) +
                                              " build and load failed")
                            fail_modules.append(path)  # Mark as failed
                            return False
                        elif res == True:  # Module and its sub-dependencies was/were built and loaded successfully
                            this_ldflags.append("-Wl,--library=:" + str(
                                (base_directory / modname /
                                 (modname + ".so")).resolve()))
                # List sources
                sources = []
                for subpath in path.iterdir():
                    if subpath.is_file() and ("").join(
                            subpath.suffixes) in source_suffixes:
                        sources.append(str(subpath))
                # Build and load this module
                try:
                    res = torch.utils.cpp_extension.load(
                        name=path.name,
                        sources=sources,
                        extra_cflags=extra_cflags,
                        extra_cuda_cflags=extra_cuda_cflags,
                        extra_ldflags=this_ldflags,
                        extra_include_paths=extra_include_paths,
                        build_directory=str(path),
                        verbose=debug_mode,
                        is_python_module=is_python_module)
                    if is_python_module:
                        glob[path.name[3:]] = res
                except Exception as err:
                    tools.warning("Unable to build and load " +
                                  repr(str(path.name)) + ": " + str(err))
                    fail_modules.append(path)  # Mark as failed
                    return False
                done_modules.append(path)  # Mark as built and loaded
                return True

    # Main loop
    for path in base_directory.iterdir():
        if path.is_dir():
            try:
                build_and_load_one(path)
            except Exception as err:
                tools.warning("Exception while processing " + repr(str(path)) +
                              ": " + str(err))
                with tools.Context("traceback", "trace"):
                    traceback.print_exc()
Esempio n. 19
0
def _build_library(libpath, doneset, failset, headers, libstack=[], nocuda=False):
  """ (Re)build a library directory and its dependencies into their associated shared objects.
  Args:
    libpath  Library directory path
    doneset  Set of other, successfully built library directory paths to update
    failset  Set of other, not compiling library directory paths to update
    headers  List of shared header paths
    libstack Constant stack of dependent library directory paths
    nocuda   CUDA compiler was not found, don't try to compile these files
  Returns:
    Built library shared object path (None on failure)
  """
  with tools.Context(libpath.name, None):
    try:
      # Watch out for a dependency cycle
      libpath  = libpath.resolve()
      hascycle = libpath in libstack
      libstack += [libpath]
      if hascycle:
        raise RuntimeError("dependency cycle found")
      # List dependencies and sources (per category) to build
      depends = [] # Library directory paths to build (some may already be built/not compile)
      shareds = [] # Shared object paths this library depends on
      headers = list(headers) # Header paths (initially copy of common headers)
      srcscpu = [] # C++ source paths
      srcsgpu = [] # CUDA source paths
      libroot = libpath.parent
      for path in libpath.iterdir():
        try:
          path = path.resolve()
        except Exception:
          if path.is_symlink():
            raise RuntimeError("missing dependency " + repr(os.readlink(str(path))))
          continue # Else silently ignore file
        if path.is_dir():
          if path.parent != libroot: # Silently ignore directory
            continue
          if _build_check_ident(path): # Is a valid dependency
            depends.append(path)
        else:
          if path.parent != libpath: # Silently ignore file
            continue
          exts = path.suffixes
          if len(exts) > 0:
            if exts[-1] in _build_exts_hdr:
              headers.append(path)
              continue
            elif exts[-1] in _build_exts_cuda:
              srcsgpu.append(path)
              continue
            elif exts[-1] in _build_exts_src:
              if len(exts) > 1 and exts[-2] in _build_exts_cuda:
                srcsgpu.append(path)
              else:
                srcscpu.append(path)
              continue
            elif exts[-1] in _build_exts_obj:
              continue
          tools.trace("Ignoring file " + repr(path.name) + ": no/unrecognized extension")
      if nocuda: # No CUDA compiler => we ignore any CUDA source
        srcsgpu.clear()
      # Process dependencies first
      for path in depends:
        if path in failset:
          raise RuntimeError("dependency " + repr(path.name) + " could not be built")
        if path in doneset:
          so_path = _build_so_path(path)
        else:
          so_path = _build_library(path, doneset, failset, headers, libstack, nocuda=nocuda)
          if so_path is None:
            raise RuntimeError("dependency " + repr(path.name) + " could not be built")
        shareds.append(so_path)
      # Process sources second
      obj_paths = [] # Object paths to link
      for src_path in srcscpu:
        obj_path = pathlib.Path(str(src_path) + ".o")
        if _build_must_rebuild(obj_path, headers + [src_path]):
          if not _execute(_build_cpp_cmd(src_path, obj_path, len(srcsgpu) > 0)):
            raise RuntimeError("C++ source " + repr(src_path.name) + " did not compile")
        obj_paths.append(obj_path)
      for src_path in srcsgpu:
        obj_path = pathlib.Path(str(src_path) + ".o")
        if _build_must_rebuild(obj_path, headers + [src_path]):
          if not _execute(_build_cuda_cmd(src_path, obj_path)):
            raise RuntimeError("CUDA source " + repr(src_path.name) + " did not compile")
        obj_paths.append(obj_path)
      # (Re)link the shared object
      so_path = _build_so_path(libpath)
      if _build_must_rebuild(so_path, obj_paths):
        if not _execute(_build_link_cmd(obj_paths, shareds, so_path)):
          raise RuntimeError("final shared object " + repr(so_path.name) + " could not be linked")
      doneset.add(libpath)
      return so_path
    except Exception as err:
      tools.warning("Build failed: " + str(err))
      failset.add(libpath)
      return None
Esempio n. 20
0
                (name, message))
        # Attack
        res = unchecked(f_real=f_real, **kwargs)
        # Forward asserted return value
        assert isinstance(res, list) and len(
            res
        ) == f_real, "Expected attack %r to return a list of %f Byzantine gradients, got %r" % (
            name, f_real, res)
        return res

    # Select which function to call by default
    func = checked if __debug__ else unchecked
    # Bind all the (sub) functions to the selected function
    setattr(func, "check", check)
    setattr(func, "checked", checked)
    setattr(func, "unchecked", unchecked)
    # Export the selected function with the associated name
    attacks[name] = func


# Registered attacks (mapping name -> attack)
attacks = dict()

# Load native and all local modules
with tools.Context("attacks", None):
    tools.import_directory(pathlib.Path(__file__).parent, globals())

# Bind/overwrite the attack names with the associated attacks in globals()
for name, attack in attacks.items():
    globals()[name] = attack
Esempio n. 21
0
        default=0,
        help=
        "How many training steps between experiment checkpointing, 0 or leave '--result-directory' empty for no checkpointing"
    )
    parser.add_argument(
        "--user-input-delta",
        type=int,
        default=0,
        help=
        "How many training steps between two prompts for user command inputs, 0 for no user input"
    )
    # Parse command line
    return parser.parse_args(sys.argv[1:])


with tools.Context("cmdline", "info"):
    args = process_commandline()
    # Parse additional arguments
    for name in ("init_multi", "init_mono", "gar", "attack", "model", "loss",
                 "criterion"):
        name = f"{name}_args"
        keyval = getattr(args, name)
        setattr(args, name,
                dict() if keyval is None else tools.parse_keyval(keyval))
    # Count the number of real honest workers
    args.nb_honests = args.nb_workers - args.nb_real_byz
    if args.nb_honests < 0:
        tools.fatal(
            f"Invalid arguments: there are more real Byzantine workers ({args.nb_real_byz}) than total workers ({args.nb_workers})"
        )
    # Check the learning rate and associated options
Esempio n. 22
0
  parser.add_argument("--result-directory",
    type=str,
    default=None,
    help="Path of the directory in which to save the experiment results (loss, cross-accuracy, ...) and checkpoints, empty for no saving")
  parser.add_argument("--evaluation-delta",
    type=int,
    default=100,
    help="How many training steps between model evaluations, 0 for no evaluation")
  parser.add_argument("--user-input-delta",
    type=int,
    default=0,
    help="How many training steps between two prompts for user command inputs, 0 for no user input")
  # Parse command line
  return parser.parse_args(sys.argv[1:])

with tools.Context("cmdline", "info"):
  args = process_commandline()
  # Parse additional arguments
  for name in ("gar", "attack", "model", "dataset", "loss", "criterion"):
    name = f"{name}_args"
    keyval = getattr(args, name)
    setattr(args, name, dict() if keyval is None else tools.parse_keyval(keyval))
  # Count the number of real honest workers
  args.nb_honests = args.nb_workers - args.nb_real_byz
  if args.nb_honests < 0:
    tools.fatal(f"Invalid arguments: there are more real Byzantine workers ({args.nb_real_byz}) than total workers ({args.nb_workers})")
  # Check general training parameters
  if args.momentum < 0.:
    tools.fatal(f"Invalid arguments: negative momentum factor {args.momentum}")
  if args.dampening < 0.:
    tools.fatal(f"Invalid arguments: negative dampening factor {args.dampening}")
Esempio n. 23
0
def interactive(glbs=None, lcls=None, prompt=">>> ", cprmpt="... "):
  """ Switch to a simple interactive prompt, execute CTRL+D (or equivalent) to leave.
  Args:
    glbs   Globals dictionary to use, None to use caller's globals
    lcls   Locals dictionary to use, None to use given globals or caller's locals/globals
    prompt Command prompt to display
    cprmpt Command prompt to display when continuing a line
  """
  # Recover caller's globals and locals
  try:
    caller = sys._getframe().f_back
  except Exception:
    caller = None
    if glbs is None:
      tools.warning("Unable to recover caller's frame, locals and globals", context="interactive")
  if glbs is None:
    if caller is not None and hasattr(caller, "f_globals"):
      glbs = caller.f_globals
    else:
      glbs = dict()
  if lcls is None:
    if caller is not None and hasattr(caller, "f_locals"):
      lcls = caller.f_locals
    else:
      lcls = glbs
  # Command input and execution
  command   = ""
  statement = False
  while True:
    print(prompt if len(command) == 0 else cprmpt, end="", flush=True)
    try:
      # Input new line
      try:
        line = input()
        print("\033[A") # Trick to "advertise" new line on stdout after new line on stdin
      except BaseException as err:
        if any(isinstance(err, cls) for cls in (EOFError, KeyboardInterrupt)):
          print() # Since no new line was printed by pressing ENTER
        return
      # Handle expression
      if not statement:
        try:
          res = eval(line, glbs, lcls)
          if res is not None:
            print(res)
        except SyntaxError: # Heuristic that we are dealing with a statement
          statement = True
      # Handle single or multi-line statement(s)
      if statement:
        if len(command) == 0: # Just went through trying an expression
          command = line
          try:
            exec(command, glbs, lcls)
          except SyntaxError: # Heuristic that we are dealing with a multi-line statement
            continue
        elif len(line) > 0:
          command += os.linesep + line
          continue
        else: # Multi-line statement is complete
          exec(command, glbs, lcls)
    except Exception:
      with tools.Context("uncaught", "error"):
        traceback.print_exc()
    command = ""
    statement = False
Esempio n. 24
0
        default="auto",
        help=
        "Comma-separated list of devices on which to run the experiments, used in a round-robin fashion"
    )
    parser.add_argument(
        "--supercharge",
        type=int,
        default=1,
        help=
        "How many experiments are run in parallel per device, must be positive"
    )
    # Parse command line
    return parser.parse_args(sys.argv[1:])


with tools.Context("cmdline", "info"):
    args = process_commandline()
    # Check the "supercharge" parameter
    if args.supercharge < 1:
        tools.fatal("Expected a positive supercharge value, got %d" %
                    args.supercharge)
    # Make the result directories
    def check_make_dir(path):
        path = pathlib.Path(path)
        if path.exists():
            if not path.is_dir():
                tools.fatal("Given path %r must point to a directory" %
                            (str(path), ))
        else:
            path.mkdir(mode=0o755, parents=True)
        return path
Esempio n. 25
0
    nocuda = True
    for path in os.environ["PATH"].split(os.pathsep):
      if (pathlib.Path(path) / _build_cudabin).exists():
        nocuda = False
        break
    # List all common headers
    headers = []
    if _build_include is not None:
      for path in _build_include.iterdir():
        if path.suffix in _build_exts_hdr:
          headers.append(path)
    # Compile libraries and load OP
    doneset = set()
    failset = set()
    for dirpath in pathlib.Path(__file__).resolve().parent.iterdir():
      ident = dirpath.name[:3]
      if dirpath.is_dir() and ident in _loader_hooks.keys(): # Is a library directory
        if dirpath not in doneset and dirpath not in failset:
          so_path = _build_library(dirpath, doneset, failset, headers, nocuda=nocuda)
          loader  = _loader_hooks[ident]
          if so_path is not None and loader is not None: # Successful build and loader needed
            loader(so_path)
  except Exception as err:
    with tools.Context(ident, "warning"):
      print("Loading failed while compiling " + repr(ident) + ": " + str(err))
      with tools.Context("traceback", "trace"):
        traceback.print_exc()

with tools.Context("native", None):
  _loader()
Esempio n. 26
0
        default="auto",
        help=
        "Comma-separated list of devices on which to run the experiments, used in a round-robin fashion"
    )
    parser.add_argument(
        "--supercharge",
        type=int,
        default=1,
        help=
        "How many experiments are run in parallel per device, must be positive"
    )
    # Parse command line
    return parser.parse_args(sys.argv[1:])


with tools.Context("cmdline", "info"):
    args = process_commandline()
    # Check the "supercharge" parameter
    if args.supercharge < 1:
        tools.fatal(
            f"Expected a positive supercharge value, got {args.supercharge}")
    # Make the result directories
    def check_make_dir(path):
        path = pathlib.Path(path)
        if path.exists():
            if not path.is_dir():
                tools.fatal(
                    f"Given path {str(path)!r} must point to a directory")
        else:
            path.mkdir(mode=0o755, parents=True)
        return path
Esempio n. 27
0
        default="auto",
        help=
        "Comma-separated list of devices on which to run the experiments, used in a round-robin fashion"
    )
    parser.add_argument(
        "--supercharge",
        type=int,
        default=1,
        help=
        "How many experiments are run in parallel per device, must be positive"
    )
    # Parse command line
    return parser.parse_args(sys.argv[1:])


with tools.Context("cmdline", "info"):
    args = process_commandline()
    # Check the "supercharge" parameter
    if args.supercharge < 1:
        tools.fatal(
            f"Expected a positive supercharge value, got {args.supercharge}")
    # Make the result directories
    def check_make_dir(path):
        path = pathlib.Path(path)
        if path.exists():
            if not path.is_dir():
                tools.fatal(
                    f"Given path {str(path)!r} must point to a directory")
        else:
            path.mkdir(mode=0o755, parents=True)
        return path