Пример #1
0
def main(
    solvers_path,
    tasks_root,
    budget,
    discard = False,
    runs = 4,
    only_solver = None,
    suffix = ".runs.csv",
    workers = 0,
    ):
    """Collect solver running-time data."""

    cargo.enable_default_logging()

    def yield_runs():
        bundle = borg.load_solvers(solvers_path)
        paths = list(cargo.files_under(tasks_root, bundle.domain.extensions))

        if not paths:
            raise ValueError("no paths found under specified root")

        if only_solver is None:
            solver_names = bundle.solvers.keys()
        else:
            if only_solver not in bundle.solvers:
                raise ArgumentError("no such solver")

            solver_names = [only_solver]

        for _ in xrange(runs):
            for solver_name in solver_names:
                for path in paths:
                    yield (run_solver_on, [solvers_path, solver_name, path, budget])

    def collect_run(task, row):
        if not discard:
            # unpack run outcome
            (solver_name, budget, cost, succeeded, answer) = row

            if answer is None:
                answer_text = None
            else:
                answer_text = base64.b64encode(zlib.compress(pickle.dumps(answer)))

            # write it to disk
            (_, _, cnf_path, _) = task.args
            csv_path = cnf_path + suffix
            existed = os.path.exists(csv_path)

            with open(csv_path, "a") as csv_file:
                writer = csv.writer(csv_file)

                if not existed:
                    writer.writerow(["solver", "budget", "cost", "succeeded", "answer"])

                writer.writerow([solver_name, budget, cost, succeeded, answer_text])

    cargo.do_or_distribute(yield_runs(), workers, collect_run)
Пример #2
0
def main(local_path, train_path):
    """Compute the machine speed calibration ratio."""

    cargo.enable_default_logging()

    local_median = read_median_cost(local_path)
    train_median = read_median_cost(train_path)

    logger.info("local median run time is %.2f CPU seconds", local_median)
    logger.info("model median run time is %.2f CPU seconds", train_median)
    logger.info("local speed ratio is thus %f", local_median / train_median)
Пример #3
0
def main(path,
         executable,
         no_tmux=False,
         timeout=250,
         window=None,
         *arguments):
    """
    Run something in response to changes in a directory.
    """

    # enable logging
    cargo.enable_default_logging()

    # prepare the notification framework
    command = [executable] + list(arguments)
    manager = pyinotify.WatchManager()
    handler = TriggerHandler()
    notifier = pyinotify.Notifier(manager, handler)

    manager.add_watch(
        path,
        pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY,
        rec=True,
    )

    # watch for and respond to events
    try:
        while True:
            triggered = notifier.check_events()

            if triggered:
                # coalesce events
                notifier.read_events()
                notifier.process_events()

                if timeout is not None:
                    while notifier.check_events(timeout=timeout):
                        notifier.read_events()
                        notifier.process_events()

                # run the command
                execute_command(command, not no_tmux, window)
    finally:
        if not no_tmux:
            subprocess.call([
                "tmux", "-q", "setw", "-t",
                str(window), "window-status-bg", "default"
            ])
Пример #4
0
def main(out_path, setup_path):
    """Prepare to visualize run data."""

    cargo.enable_default_logging()

    # build the configuration
    with open(setup_path) as setup_file:
        setup = json.load(setup_file)

    view = ViewData(os.path.dirname(setup_path), setup)

    # write it to disk
    logger.info("writing visualization data to %s", out_path)

    with open(out_path, "w") as out_file:
        pickle.dump(view, out_file)
Пример #5
0
def main(root_path, encoding_name, min_width, max_width, min_height, max_height):
    """Encode a grid-coloring problem in CNF."""

    cargo.enable_default_logging()

    for width in xrange(min_width, max_width + 1):
        for height in xrange(min_height, max_height + 1):
            # encode the instance
            grid = gridc.Grid(width, height)
            encoding = gridc.encoding(encoding_name)
            instance = encoding(grid).encode()

            # write it to disk
            out_name = "{0}x{1}.{2}.cnf".format(width, height, encoding_name)
            out_path = os.path.join(root_path, out_name)

            with open(out_path, "w") as out_file:
                instance.write(out_file)
Пример #6
0
def main(out_path, fit_path):
    """Visualize model parameters."""

    cargo.enable_default_logging()

    # copy over static content
    static_path = os.path.join(borg_explorer.__path__[0], "static")

    distutils.dir_util.copy_tree(static_path, out_path)

    # load the model(s)
    logger.info("loading visualization data from %s", fit_path)

    with open(fit_path) as fit_file:
        fit = pickle.load(fit_file)

    # write data directories
    logger.info("writing inputs archive")

    with open(os.path.join(out_path, "inputs.tar.gz"), "w") as archive_file:
        archive_file.write(fit.data_archive)

    for (name, category) in fit.categories.items():
        write_category(out_path, name, category)

    # generate the visualization
    loader = jinja2.PackageLoader("borg_explorer", "templates")
    environment = jinja2.Environment(loader = loader)

    def write_rendered(template_name, output_name, **kwargs):
        template = environment.get_template(template_name)

        with open(os.path.join(out_path, output_name), "w") as output_file:
            output_file.write(template.render(**kwargs).encode("utf-8"))

    write_rendered("index.html", "index.html", base_url = fit.setup["base_url"])
    write_rendered("borgview.js", "borgview.js", base_url = fit.setup["base_url"])
    write_rendered("borgview.css", "borgview.css")
    write_rendered("analytics.js", "analytics.js")

    with open(os.path.join(out_path, "categories.json"), "w") as output_file:
        category_list = [{"name": k, "path": sanitize(k)} for k in fit.categories.keys()]

        json.dump(category_list, output_file)
Пример #7
0
def main(domain_name, tasks_root, workers=0):
    """Collect task features."""

    cargo.enable_default_logging()

    def yield_runs():
        domain = borg.get_domain(domain_name)
        paths = list(cargo.files_under(tasks_root, domain.extensions))

        for path in paths:
            yield (features_for_path, [domain, path])

    def collect_run(task, (names, values)):
        (_, cnf_path) = task.args
        csv_path = cnf_path + ".features.csv"

        with open(csv_path, "w") as csv_file:
            csv.writer(csv_file).writerow(names)
            csv.writer(csv_file).writerow(values)
Пример #8
0
def main(out_path, portfolio_name, solvers_path, *tasks_roots):
    """Train a solver."""

    cargo.enable_default_logging()

    # load the solvers bundle
    bundle = borg.load_solvers(solvers_path)

    # train the portfolio
    training = borg.storage.TrainingData(tasks_roots, bundle.domain)
    portfolio = borg.portfolios.named[portfolio_name](bundle, training, 50.0, 42) # XXX

    logger.info("portfolio training complete")

    # write it to disk
    with open(out_path, "w") as out_file:
        pickle.dump(portfolio, out_file, protocol = -1)

    logger.info("portfolio written to %s", out_path)
Пример #9
0
def main(out_path, domain_name, budget, tasks_root, tests_root = None, live = False, runs = 16, workers = 0):
    """Collect validation results."""

    cargo.enable_default_logging()

    cargo.get_logger("borg.portfolios", level = "DETAIL")

    def yield_runs():
        # build solvers and train / test sets
        if live:
            domain = borg.get_domain(domain_name)
        else:
            domain = FakeDomain(borg.get_domain(domain_name))

        paths = list(cargo.files_under(tasks_root, domain.extensions))
        examples = int(round(len(paths) * 0.50))

        logger.info("found %i tasks", len(paths))

        if tests_root is not None:
            tests_root_paths = list(cargo.files_under(tests_root, domain.extensions))

        # build validation runs
        for _ in xrange(runs):
            split = uuid.uuid4()
            shuffled = sorted(paths, key = lambda _ : numpy.random.rand())
            train_paths = shuffled[:examples]

            if tests_root is None:
                test_paths = shuffled[examples:]
            else:
                test_paths = tests_root_paths

            for name in borg.portfolios.named:
                yield (run_validation, [name, domain, train_paths, test_paths, budget, split])

    with open(out_path, "w") as out_file:
        writer = csv.writer(out_file)

        writer.writerow(["name", "budget", "cost", "rate", "split"])

        cargo.distribute_or_labor(yield_runs(), workers, lambda _, r: writer.writerows(r))
Пример #10
0
def main(path, executable, no_tmux = False, timeout = 250, window = None, *arguments):
    """
    Run something in response to changes in a directory.
    """

    # enable logging
    cargo.enable_default_logging()

    # prepare the notification framework
    command  = [executable] + list(arguments)
    manager  = pyinotify.WatchManager()
    handler  = TriggerHandler()
    notifier = pyinotify.Notifier(manager, handler)

    manager.add_watch(
        path,
        pyinotify.IN_CREATE | pyinotify.IN_DELETE | pyinotify.IN_MODIFY,
        rec = True,
        )

    # watch for and respond to events
    try:
        while True:
            triggered = notifier.check_events()

            if triggered:
                # coalesce events
                notifier.read_events()
                notifier.process_events()

                if timeout is not None:
                    while notifier.check_events(timeout = timeout):
                        notifier.read_events()
                        notifier.process_events()

                # run the command
                execute_command(command, not no_tmux, window)
    finally:
        if not no_tmux:
            subprocess.call(["tmux", "-q", "setw", "-t", str(window), "window-status-bg", "default"])
Пример #11
0
def main(req_address, condor_id):
    """Do arbitrary distributed work."""

    cargo.enable_default_logging()

    # connect to the work server
    logger.info("connecting to %s", req_address)

    context = zmq.Context()

    req_socket = context.socket(zmq.REQ)

    req_socket.connect(req_address) 

    # enter the work loop
    try:
        work_loop(condor_id, req_socket)
    finally:
        logger.info("flushing sockets and terminating zeromq context")

        req_socket.close()
        context.term()

        logger.info("zeromq cleanup complete")
Пример #12
0
def main(req_address, condor_id):
    """Do arbitrary distributed work."""

    cargo.enable_default_logging()

    # connect to the work server
    logger.info("connecting to %s", req_address)

    context = zmq.Context()

    req_socket = context.socket(zmq.REQ)

    req_socket.connect(req_address)

    # enter the work loop
    try:
        work_loop(condor_id, req_socket)
    finally:
        logger.info("flushing sockets and terminating zeromq context")

        req_socket.close()
        context.term()

        logger.info("zeromq cleanup complete")