def main(config, experiments, num_cpus, num_gpus, redis_address):
    print("config =", config.name)
    print("experiments =", experiments)
    print("num_gpus =", num_gpus)
    print("num_cpus =", num_cpus)
    print("redis_address =", redis_address)

    # Use configuration file location as the project location.
    project_dir = os.path.dirname(config.name)
    project_dir = os.path.abspath(project_dir)
    print("project_dir =", project_dir)

    # Load and parse experiment configurations
    configs = parse_config(config, experiments, globals_param=globals())

    # Pre-download dataset
    data_dir = os.path.join(project_dir, "data")
    datasets.CIFAR10(data_dir, download=True, train=True)

    # Initialize ray cluster
    if redis_address is not None:
        ray.init(redis_address=redis_address, include_webui=True)
        num_cpus = 1
    else:
        ray.init(num_cpus=num_cpus,
                 num_gpus=num_gpus,
                 local_mode=num_cpus == 1)

    # Run all experiments in parallel
    results = []
    for exp in configs:
        config = configs[exp]
        config["name"] = exp

        # Make sure local directories are relative to the project location
        path = config.get("path", None)
        if path and not os.path.isabs(path):
            config["path"] = os.path.join(project_dir, path)

        data_dir = config.get("data_dir", "data")
        if not os.path.isabs(data_dir):
            config["data_dir"] = os.path.join(project_dir, data_dir)

        # When running multiple hyperparameter searches on different experiments,
        # ray.tune will run one experiment at the time. We use "ray.remote" to
        # run each tune experiment in parallel as a "remote" function and wait until
        # all experiments complete
        results.append(
            run_experiment.remote(config,
                                  NotSoDenseTune,
                                  num_cpus=num_cpus,
                                  num_gpus=num_gpus / num_cpus))

    # Wait for all experiments to complete
    ray.get(results)

    ray.shutdown()
def main(config, experiments, num_cpus, num_gpus, redis_address):
  print("config =", config.name)
  print("experiments =", experiments)
  print("num_gpus =", num_gpus)
  print("num_cpus =", num_cpus)
  print("redis_address =", redis_address)

  # Use configuration file location as the project location.
  projectDir = os.path.dirname(config.name)
  projectDir = os.path.abspath(projectDir)
  print("projectDir =", projectDir)

  # Load and parse experiment configurations
  configs = parse_config(config, experiments, globals=globals())

  # Pre-download dataset
  data_dir = os.path.join(projectDir, "data")
  datasets.CIFAR10(data_dir, download=True, train=True)

  # Initialize ray cluster
  if redis_address is not None:
    ray.init(redis_address=redis_address, include_webui=True)
    num_cpus = 1
  else:
    ray.init(num_cpus=num_cpus, num_gpus=num_gpus, local_mode=num_cpus == 1)

  # Run all experiments in parallel
  results = []
  for exp in configs:
    config = configs[exp]
    config["name"] = exp

    # Make sure local directories are relative to the project location
    path = config.get("path", None)
    if path and not os.path.isabs(path):
      config["path"] = os.path.join(projectDir, path)

    data_dir = config.get("data_dir", "data")
    if not os.path.isabs(data_dir):
      config["data_dir"] = os.path.join(projectDir, data_dir)

    # When running multiple hyperparameter searches on different experiments,
    # ray.tune will run one experiment at the time. We use "ray.remote" to
    # run each tune experiment in parallel as a "remote" function and wait until
    # all experiments complete
    results.append(run_experiment.remote(config, MobileNetTune,
                                         num_cpus=1,
                                         num_gpus=num_gpus / num_cpus))

  # Wait for all experiments to complete
  ray.get(results)

  ray.shutdown()
Exemple #3
0
def main(config, experiment, show_list):
    configs = parse_config(config, experiment, globals_param=globals())
    if show_list:
        print("Experiments:", list(configs.keys()))
        return

    project_dir = os.path.dirname(config.name)
    project_dir = os.path.abspath(project_dir)

    # Load dataset
    data_dir = os.path.join(project_dir, "data")
    datasets.CIFAR10(data_dir, download=True, train=True)

    train_models(configs, project_dir=project_dir)
def main(config, experiment, show_list):
  configs = parse_config(config, experiment, globals=globals())
  if show_list:
    print("Experiments:", list(configs.keys()))
    return

  projectDir = os.path.dirname(config.name)
  projectDir = os.path.abspath(projectDir)

  # Load dataset
  data_dir = os.path.join(projectDir, "data")
  datasets.CIFAR10(data_dir, download=True, train=True)

  trainModels(configs, projectDir=projectDir)
Exemple #5
0
                         help="run only selected experiments, by default run "
                              "all experiments in config file.")

  return optparser.parse_args()



if __name__ == "__main__":

  print("Using torch version", torch.__version__)
  print("Torch device count=", torch.cuda.device_count())
  # Load and parse command line option and experiment configurations
  options = parse_options()
  if options.config != "":
    with open(options.config) as f:
      configs = parse_config(f, options.experiments)
    projectDir = os.path.dirname(options.config)

  elif options.params != "":
    with open(options.params) as f:
      params = json.load(f)
      params["data_dir"] = os.path.abspath(os.path.join(".", "data"))
      params["path"] = os.path.abspath(os.path.dirname(options.params))
      configs = {params["name"]: params}
    projectDir = "."

  else:
    raise RuntimeError("Either a .cfg or a params .json file must be specified")

  # Use configuration file location as the project location.
  projectDir = os.path.abspath(projectDir)
Exemple #6
0
                           dest="num_images",
                           default=1,
                           help="The number of images to test")
    optparser.add_argument(
        "-e",
        "--experiment",
        action="append",
        dest="experiments",
        help="run only selected experiments, by default run "
        "all experiments in config file.")

    return optparser.parse_args()


if __name__ == "__main__":

    print("Torch device count=", torch.cuda.device_count())
    # Load and parse command line option and experiment configurations
    options = parse_options()
    configs = parse_config(options.config, options.experiments)

    # Use configuration file location as the project location.
    projectDir = os.path.dirname(options.config.name)
    projectDir = os.path.abspath(projectDir)

    for exp in configs:
        config = configs[exp]
        config["name"] = exp

        testModel(config, options, projectDir=projectDir)
                           dest="plot_gradients",
                           help="Plot gradients for debugging",
                           default=False)
    optparser.add_argument("-v",
                           "--verbose",
                           dest="verbose",
                           help="Verbosity",
                           default=0)

    return optparser.parse_args()


if __name__ == "__main__":
    # Load and parse command line option and experiment configurations
    options = parse_options()
    configs = parse_config(options.config, options.experiments, globals(),
                           locals())

    # Use configuration file location as the project location.
    # Ray Tune default working directory is "~/ray_results"
    project_dir = os.path.dirname(options.config.name)
    project_dir = os.path.abspath(project_dir)

    print("Using torch version", torch.__version__)
    print("Torch device count=", torch.cuda.device_count())

    # Initialize ray cluster
    if "REDIS_ADDRESS" in os.environ:
        ray.init(redis_address=os.environ["REDIS_ADDRESS"], include_webui=True)
    else:
        # Initialize ray cluster
        ray.init(
        help="run only selected experiments, by default run "
        "all experiments in config file.",
    )

    return optparser.parse_args()


if __name__ == "__main__":

    print("Using torch version", torch.__version__)
    print("Torch device count=", torch.cuda.device_count())
    # Load and parse command line option and experiment configurations
    options = parse_options()
    if options.config != "":
        with open(options.config) as f:
            configs = parse_config(f, options.experiments)
        project_dir = os.path.dirname(options.config)

    elif options.params != "":
        with open(options.params) as f:
            params = json.load(f)
            params["data_dir"] = os.path.abspath(os.path.join(".", "data"))
            params["path"] = os.path.abspath(os.path.dirname(options.params))
            configs = {params["name"]: params}
        project_dir = "."

    else:
        raise RuntimeError("Either a .cfg or a params .json file must be specified")

    # Use configuration file location as the project location.
    project_dir = os.path.abspath(project_dir)