示例#1
0
def run(
    formula,
    join_tree,
    timeout,
    output,
    entry_type,
    tensor_library,
    max_width,
    thread_limit,
    performance_factor,
):
    sys.setrecursionlimit(100000)
    tensor_library = tensor_library(entry_type, thread_limit=thread_limit)

    stopwatch = util.Stopwatch()
    with util.TimeoutTimer(timeout) as timer:
        try:
            formula = util.Formula.parse_DIMACS(formula)
            stopwatch.record_interval("Parse Formula")

            tree = get_join_tree(formula, join_tree, timer, output, max_width,
                                 performance_factor)
            if tree is not None:
                timer.reset_timeout(timeout)
                stopwatch.record_interval("Parse Join Tree")

                count = execute_join_tree(formula, tree, tensor_library,
                                          output)
                if count is not None:
                    output.output_pair("Count", count)
                    stopwatch.record_interval("Execution")
            stopwatch.record_total("Total")
        except TimeoutError:
            util.log("Parsing timed out", flush=True)
            output.output_pair("Error", "execution timeout")
        except:
            util.log("Unknown error", flush=True)
            util.log(traceback.format_exc())
            output.output_pair("Error", "unknown error")

    for name, record in stopwatch.records.items():
        output.output_pair(name + " Time", record)
示例#2
0
                   "receptor_type": 0
               })

nest.Connect(multimeter, [m[0]])

if nest.version()=='NEST 2.12.0':
    syn_dict = {
        "model": 'excitatory',
    }
    conn_dict = {
        "rule": "one_to_one",
    }

    nest.Connect(n, m, conn_dict, syn_dict)
    nest.Connect(m, spikedetector)
    # print 'Connecting ' + ' my_nest.GetConnections ', len(nest.GetConnections(n)), len(n)

if nest.version()=='NEST 2.2.2':

    nest.Connect(n, m, **{
        "model":"excitatory"
    })

    nest.ConvergentConnect(m, spikedetector)

with util.Stopwatch('Speed test'):
    nest.SetKernelStatus({'print_time':True})
    nest.Simulate(1000)

util.show(multimeter, spikedetector)
示例#3
0
def run(
    network_pair,
    timeout,
    output,
    entry_type,
    tensor_library,
    max_rank,
    thread_limit,
    record_log,
    mem_limit,
):
    sys.setrecursionlimit(100000)
    tensor_library = tensor_library(entry_type, thread_limit=thread_limit)
    if mem_limit is not None:
        mem_limit /= tensor_library.get_entry_size()

    stopwatch = util.Stopwatch()
    elapsed_time, tree, network = pickle.load(network_pair)
    edges_to_slice = set([])
    groups_to_slice = []
    util.log("Using tree of max-rank " + str(tree.maxrank))
    stopwatch.record_interval("Load")

    if tree.maxrank <= max_rank:
        FLOPs, memory, edge_to_slice = tree.estimate_cost(edges_to_slice)
        while mem_limit is not None and memory > mem_limit:
            equivalent_edges = network.find_equivalent_edges(edge_to_slice)
            util.log(
                "Memory usage at " + str(memory) +
                "; slicing network at equivalent edges " +
                str(equivalent_edges),
                flush=True,
            )
            groups_to_slice.append(equivalent_edges)
            edges_to_slice |= equivalent_edges
            FLOPs, memory, edge_to_slice = tree.estimate_cost(edges_to_slice)

        try:
            with util.TimeoutTimer(timeout) as timer:
                while True:
                    try:
                        result = 0
                        for slice_network in network.slice_groups(
                                groups_to_slice):
                            tensor_result, contract_log = tensor_library.contract(
                                slice_network, tree, record_log)
                            result += tensor_result[tuple()]
                        break
                    except tensor_network.OutOfMemoryError:
                        if mem_limit is None:
                            raise RuntimeError(
                                "Ran out of memory when performing contractions"
                            )

                        # The estimation of memory usage is imperfect; another slice is required
                        equivalent_edges = network.find_equivalent_edges(
                            edge_to_slice)
                        util.log(
                            "Memory usage at " + str(memory) +
                            "; slicing network at equivalent edges " +
                            str(equivalent_edges),
                            flush=True,
                        )
                        groups_to_slice.append(equivalent_edges)
                        edges_to_slice |= equivalent_edges
                        FLOPs, memory, edge_to_slice = tree.estimate_cost(
                            edges_to_slice)
                stopwatch.record_interval("Contraction")
                stopwatch.record_total("Total")
                timer.cancel()
                output.output_pair("Count", result)
                if record_log:
                    output.output_pair("Log", repr(str(contract_log)))
        except TimeoutError:
            util.log("Timed out during contraction", flush=True)
            output.output_pair("Error", "contraction timeout")
        except MemoryError:
            util.log("Ran out of memory during contraction", flush=True)
            output.output_pair("Error", "contraction memout")
        except:
            util.log("Error during contraction", flush=True)
            util.log(traceback.format_exc())
            output.output_pair("Error", "contraction unknown error")

    for name, record in stopwatch.records.items():
        output.output_pair(name + " Time", record)

    # Treewidth-based methods include the width of the underlying tree decomposition
    if hasattr(tree, "treewidth"):
        output.output_pair("Treewidth", tree.treewidth)

    output.output_pair("Max Rank", tree.maxrank)
    FLOPs, memory, _ = tree.estimate_cost(set().union(*groups_to_slice))
    output.output_pair("# Network Slices", 2**len(groups_to_slice))
    output.output_pair("Estimated Memory", float(memory))
    output.output_pair("Estimated FLOPs",
                       float(FLOPs * (2**len(groups_to_slice))))
示例#4
0
def Benchmark(blueprintTypeID, activityID, materialLevel=0):
    sw = util.Stopwatch()
    for i in xrange(1000):
        GetQuote(blueprintTypeID, activityID, materialLevel)

    print sw
示例#5
0
def run(
    benchmark,
    timeout,
    output,
    seed,
    network_construction,
    method,
    store,
    method_affinity,
):
    sys.setrecursionlimit(100000)
    stopwatch = util.Stopwatch()
    if seed is not None:
        random.seed(seed)
    else:
        seed = random.randrange(1000000)  # for decomposition solvers

    if store is not None and not os.path.exists(store):
        os.makedirs(store)

    network = network_construction(benchmark)
    util.log("Constructed network", flush=True)
    stopwatch.record_interval("Construction")

    log_trees = []  # Discovered trees, to be saved at the end
    log = []  # Log and store only decompositions of lower carving width
    full_log = []  # Log all decompositions
    best_cw = None
    with util.TimeoutTimer(timeout) as timer:
        tree_gen = method.generate_contraction_trees(
            network, timer, seed=seed, affinity=method_affinity
        )
        try:
            for tree, new_network in tree_gen:
                width = {"Carving": tree.maxrank}

                # Treewidth-based methods include the width of the underlying tree decomposition
                if hasattr(tree, "treewidth"):
                    width["Tree"] = tree.treewidth
                if hasattr(tree, "branchwidth"):
                    width["Branch"] = tree.branchwidth

                util.log("Found decomposition with " + str(width), flush=True)
                elapsed_time = stopwatch.elapsed_time()

                FLOPs, _, _ = tree.estimate_cost(set())
                full_log.append((elapsed_time, width, FLOPs))
                if best_cw is None or best_cw > width["Carving"]:
                    best_cw = width["Carving"]
                    log.append((elapsed_time, width, FLOPs))
                    if store is not None:
                        log_trees.append(
                            (
                                (elapsed_time, tree, new_network),
                                store + "/" + str(len(log)) + ".con",
                            )
                        )
                        util.log(
                            "Saved contraction tree " + str(time.time()), flush=True
                        )
        except TimeoutError:
            if best_cw is None:
                util.log("No decomposition found within the timeout", flush=True)
                output.output_pair("Error", "decomposition timeout")
        except MemoryError:
            util.log("Ran out of memory during search for decomposition", flush=True)
            output.output_pair("Error", "decomposition memout")
        except:
            util.log("Error during search for decomposition", flush=True)
            util.log(traceback.format_exc())
            output.output_pair("Error", "decomposition unknown error")
        tree_gen.close()
    output.output_pair("Log", repr(str(log)))
    output.output_pair("FullLog", repr(str(full_log)))
    for info, filename in log_trees:
        pickle.dump(info, open(filename, "wb"))
示例#6
0
def run(
    benchmark,
    timeout,
    tree_timeout,
    output,
    seed,
    method,
    network_construction,
    entry_type,
    tensor_library,
    max_rank,
    thread_limit,
    performance_factor,
    log_contraction_tree,
    method_affinity,
    mem_limit,
):
    sys.setrecursionlimit(100000)
    tensor_library = tensor_library(entry_type, thread_limit=thread_limit)
    if mem_limit is not None:
        mem_limit /= tensor_library.get_entry_size()

    stopwatch = util.Stopwatch()
    if seed is not None:
        random.seed(seed)
    else:
        seed = random.randrange(1000000)  # for decomposition solvers

    if tree_timeout <= 0:
        tree_timeout = timeout

    network = network_construction(benchmark)
    util.log("Constructed network", flush=True)

    stopwatch.record_interval("Construction")
    with util.TimeoutTimer(tree_timeout) as timer:
        tree, network, groups_to_slice, edges_to_slice = find_contraction_tree(
            method,
            network,
            seed,
            timer,
            output,
            max_rank,
            performance_factor,
            method_affinity,
            mem_limit,
        )
        stopwatch.record_interval("Tree")

        if tree is not None:
            util.log("Using tree of max-rank " + str(tree.maxrank))
            if log_contraction_tree:
                util.log("Contraction Tree: " + str(tree))

            if tree.maxrank <= max_rank:
                try:
                    timer.reset_timeout(timeout)

                    while True:
                        try:
                            result = 0
                            for slice_network in network.slice_groups(
                                    groups_to_slice):
                                tensor_result, contract_log = tensor_library.contract(
                                    slice_network, tree, False)
                                result += tensor_result[tuple()]
                            break
                        except tensor_network.OutOfMemoryError:
                            if mem_limit is None:
                                raise RuntimeError(
                                    "Ran out of memory when performing contractions"
                                )

                            # The estimation of memory usage is imperfect; another slice is required
                            _, memory, edge_to_slice = tree.estimate_cost(
                                edges_to_slice)

                            equivalent_edges = network.find_equivalent_edges(
                                edge_to_slice)
                            util.log(
                                "Memory usage at " + str(memory) +
                                "; slicing network at equivalent edges " +
                                str(equivalent_edges),
                                flush=True,
                            )
                            groups_to_slice.append(equivalent_edges)
                            edges_to_slice |= equivalent_edges
                    stopwatch.record_interval("Contraction")
                    stopwatch.record_total("Total")
                    timer.cancel()
                    output.output_pair("Count", result)
                except TimeoutError:
                    util.log("Timed out during contraction", flush=True)
                    output.output_pair("Error", "contraction timeout")
                except MemoryError:
                    util.log("Ran out of memory during contraction",
                             flush=True)
                    output.output_pair("Error", "contraction memout")
                except:
                    util.log("Error during contraction", flush=True)
                    util.log(traceback.format_exc())
                    output.output_pair("Error", "contraction unknown error")

            # Treewidth-based methods include the width of the underlying tree decomposition
            if hasattr(tree, "treewidth"):
                output.output_pair("Treewidth", tree.treewidth)
            if hasattr(tree, "branchwidth"):
                output.output_pair("Branchwidth", tree.branchwidth)

            output.output_pair("Max Rank", tree.maxrank)
            FLOPs, memory, _ = tree.estimate_cost(
                set().union(*groups_to_slice))
            output.output_pair("# Network Slices", 2**len(groups_to_slice))
            output.output_pair("Estimated Memory", float(memory))
            output.output_pair("Estimated FLOPs",
                               float(FLOPs * (2**len(groups_to_slice))))

    for name, record in stopwatch.records.items():
        output.output_pair(name + " Time", record)

    if len(tensor_network.tensor_apis.contraction_info) > 0:
        output.output_pair(
            "Contraction Log",
            repr(str(tensor_network.tensor_apis.contraction_info)))