Esempio n. 1
0
 def __init__(self, dataset, model, args):
     # Parse key:val arguments
     nbcores = len(os.sched_getaffinity(0))
     if nbcores == 0:
         nbcores = 4  # Arbitrary fallback
     args = tools.parse_keyval(args,
                               defaults={
                                   "batch-size": 32,
                                   "eval-batch-size": 1024,
                                   "weight-decay": 0.00004,
                                   "label-smoothing": 0.,
                                   "labels-offset": 0,
                                   "nb-fetcher-threads": nbcores,
                                   "nb-batcher-threads": nbcores
                               })
     if args["batch-size"] <= 0:
         raise tools.UserException(
             "Cannot make batches of non-positive size")
     # Report experiments
     with tools.Context("slim", None):
         print("Dataset name in use:   " + repr(dataset[0]) + " (in " +
               repr(dataset[1]) + ")")
         print("Dataset preprocessing: " +
               (repr(args["preprocessing"]) if "preprocessing" in
                args else "<model default>"))
         print("Model name in use:     " + repr(model))
     # Finalization
     self.__args = args
     self.__dataset = dataset
     self.__preproc = args[
         "preprocessing"] if "preprocessing" in args else model
     self.__model = model
     self.__cntr_wk = 0  # Worker instantiation counter
     self.__cntr_ev = 0  # Evaluator instantiation counter
Esempio n. 2
0
def build(struct, name, select, args, **kwargs):
    """ Call the constructor associated with the given selection and the given keyword + parsed arguments.
  Args:
    struct Structure defining constructors and their respective arguments
    name   Name of what is built by the constructor
    select Constructor to select
    args   List of "key:value" command line arguments
    ...    Key-value arguments forwarded to the constructor
  """
    # Recover constructor and argument structure
    if select not in struct:
        raise tools.UserException(
            "Unknown " + name + " " + repr(select) + ", " +
            ("no " + name +
             " available" if len(struct) == 0 else "expected one of: '" +
             ("', '").join(struct.keys()) + "'"))
    construct, args_struct = struct[select]
    # Translate parameters
    defaults = {}
    for key, val in args_struct.items():
        defaults[key] = val[0]
    args_parsed = tools.parse_keyval(args, defaults=defaults)
    # Instantiate and return
    args_kw = {}
    for key, val in args_struct.items(
    ):  # Ignore supplementary parameters by using '_struct' instead of '_parsed'
        args_kw[args_struct[key][1]] = args_parsed[key]
    return construct(**args_kw, **kwargs)
Esempio n. 3
0
 def __init__(self, nbworkers, nbbyzwrks, args):
     # Parse key:val arguments
     ps = tools.parse_keyval([] if args is None else args,
                             defaults={"ps": 0.9})["ps"]
     if ps <= 0 or ps > 1:
         raise tools.UserException("Invalid selection probability, got %s" %
                                   (ps, ))
     # Finalization
     self._p = ps
     self._f = nbbyzwrks
Esempio n. 4
0
 def __init__(self, args):
   # Parse key:val arguments
   args = tools.parse_keyval(args, defaults={"batch-size": 32})
   if args["batch-size"] <= 0:
     raise tools.UserException("Cannot make batches of non-positive size")
   # Report loading
   with tools.Context("mnist", None):
     print("Loading MNIST dataset...")
     raw_data = tf.keras.datasets.mnist.load_data()
   # Finalization
   self.__args     = args
   self.__raw_data = raw_data
   self.__datasets = None
   self.__cntr_wk  = 0 # Worker instantiation counter
   self.__cntr_ev  = 0 # Evaluator instantiation counter
Esempio n. 5
0
 def __init__(self, args):
     # Parse key:val arguments
     nbcores = len(os.sched_getaffinity(0))
     if nbcores == 0:
         nbcores = 4  # Arbitrary fallback
     args = tools.parse_keyval(args,
                               defaults={
                                   "batch-size": 32,
                                   "eval-batch-size": 1024,
                                   "nb-fetcher-threads": nbcores,
                                   "nb-batcher-threads": nbcores
                               })
     if args["batch-size"] <= 0:
         raise tools.UserException(
             "Cannot make batches of non-positive size")
     # Finalization
     self.__args = args
     self.__preproc = args[
         "preprocessing"] if "preprocessing" in args else "cifarnet"
     self.__cntr_wk = 0  # Worker instantiation counter
     self.__cntr_ev = 0  # Evaluator instantiation counter
Esempio n. 6
0
    default=100,
    help="How many training steps between model evaluations, 0 for no evaluation")
  parser.add_argument("--user-input-delta",
    type=int,
    default=0,
    help="How many training steps between two prompts for user command inputs, 0 for no user input")
  # Parse command line
  return parser.parse_args(sys.argv[1:])

with tools.Context("cmdline", "info"):
  args = process_commandline()
  # Parse additional arguments
  for name in ("gar", "attack", "model", "dataset", "loss", "criterion"):
    name = f"{name}_args"
    keyval = getattr(args, name)
    setattr(args, name, dict() if keyval is None else tools.parse_keyval(keyval))
  # Count the number of real honest workers
  args.nb_honests = args.nb_workers - args.nb_real_byz
  if args.nb_honests < 0:
    tools.fatal(f"Invalid arguments: there are more real Byzantine workers ({args.nb_real_byz}) than total workers ({args.nb_workers})")
  # Check general training parameters
  if args.momentum < 0.:
    tools.fatal(f"Invalid arguments: negative momentum factor {args.momentum}")
  if args.dampening < 0.:
    tools.fatal(f"Invalid arguments: negative dampening factor {args.dampening}")
  if args.weight_decay < 0.:
    tools.fatal(f"Invalid arguments: negative weight decay factor {args.weight_decay}")
  # Check the learning rate and associated options
  if args.learning_rate <= 0:
    tools.fatal(f"Invalid arguments: non-positive learning rate {args.learning_rate}")
  if args.learning_rate_decay < 0:
Esempio n. 7
0
        help=
        "How many training steps between two prompts for user command inputs, 0 for no user input"
    )
    # Parse command line
    return parser.parse_args(sys.argv[1:])


with tools.Context("cmdline", "info"):
    args = process_commandline()
    # Parse additional arguments
    for name in ("init_multi", "init_mono", "gar", "attack", "model", "loss",
                 "criterion"):
        name = f"{name}_args"
        keyval = getattr(args, name)
        setattr(args, name,
                dict() if keyval is None else tools.parse_keyval(keyval))
    # Count the number of real honest workers
    args.nb_honests = args.nb_workers - args.nb_real_byz
    if args.nb_honests < 0:
        tools.fatal(
            f"Invalid arguments: there are more real Byzantine workers ({args.nb_real_byz}) than total workers ({args.nb_workers})"
        )
    # Check the learning rate and associated options
    if args.learning_rate_schedule is None:
        if args.learning_rate <= 0:
            tools.fatal(
                f"Invalid arguments: non-positive learning rate {args.learning_rate}"
            )
        if args.learning_rate_decay < 0:
            tools.fatal(
                f"Invalid arguments: negative learning rate decay {args.learning_rate_decay}"