예제 #1
0
def cut_lambda_minus_one(
    models, records,
    key_topology="graph",
    key_eweights="eweights",
    key_partition="partition",
    weights_crit=0,
):
    """
    """
    topo  = models[key_topology]
    nbr_e = models[key_topology] ["nbr_e"]
    edges = models[key_topology] ["edges"]
    parts = models[key_partition]["parts"]
    wgts  = models[key_eweights] ["weights"]
    crit  = format_crit(weights_crit)
    if len(crit) > 1:
        crack_error(
            ValueError, "cut",
            "Does not handle multiple criteria yet..."
        )
    c = crit[0]
    if topo["entity"] == "graph":
        return sum(
            wgts[e][c] for e, (i, j) in enumerate(edges)
                       if parts[i] != parts[j]
        )
    else:
        return sum(
            wgts[i][c] * len([j for j in ends if parts[j] != parts[i]])
                for i, ends in enumerate(edges)
        )
예제 #2
0
파일: random_part.py 프로젝트: RemiBe/crack
def random_part(models, records, **algopt):
    """Give a random part to every node.

    Arguments:
        nbr_p: Number of parts required.

    Options:
        key_entity_in: str: Key of the entity in [models] that will be 
            partitioned. (Default is 'graph').
        key_entity_out: str: Key to store the Partition in [models].
            (Default is 'partition').
    """
    ### Arguments
    try:
        nbr_p = algopt["nbr_p"]
    except KeyError:
        crack_error(ValueError, "random_part",
            "Missing argument(s): nbr_p")
    ### Options ###
    key_in  = algopt.get("key_entity_in"  , "graph")
    part    = algopt.get("part",       0)
    key_out = algopt.get("key_entity_out" , "partition")
    ### Variables ###
    nbr_n = models[key_in]["nbr_n"]
    ### Partition ###
    start = time()
    parts = [rd.randint(0,nbr_p-1) for _ in range(nbr_n)]
    end   = time()
    init_Partition_from_args(models, {}, key_out, nbr_p, parts)
    ### Record statistics ###
    record_algos_stats(models, records, "random_part", cut=0, imb=None, t=end-start, operations=nbr_n)
예제 #3
0
def get_next_phase(l_models, records, tasks, phase_index, next_phase):
    """Analyse the fork conditions and return the next Phase.
    """
    i = 0
    phase_id = None
    if isinstance(next_phase, dict):
        phase = next_phase["phase"]
        while phase_id is None and "alt_{}".format(i) in next_phase:
            endi = next_phase["alt_{}".format(i)]
            conds = {}
            for j, cond in enumerate(endi["conds"]):
                conds[j] = FORK_FCTS[cond["algo"]](l_models, records, phase,
                                                   **cond["args"])
            if len(conds) == 1:
                if conds[0]:
                    phase_id = endi["phase"]
                    break
            else:
                if "expr" not in endi:
                    crack_error(
                        ValueError, "get_next_phase",
                        "Specify the 'expr' to chose the phase after {}.".
                        format(phase))
                if eval_expr(endi["expr"], conds, only_keywords=True):
                    phase_id = endi["phase"]
                    break
            i += 1
        if phase_id is None:
            phase_id = next_phase["next"]
    else:
        phase_id = next_phase
    # Get the corresponding phase in the task list
    return phases_index[phase_id]
예제 #4
0
파일: weights.py 프로젝트: RemiBe/crack
def format_crit(crit_spec):
    """Format the criteria as a list of int. Criteria can be entered
    as:
    - an int
    >>> crit: 1      # outputs: [1]

    - a list of int
    >>> crit: [0, 2] # outputs: [0, 2]

    - a string seperating numbers with ',' and/or '-'
    >>> crit: 0, 2-4 # outputs: [0, 2, 3, 4]
    """
    if isinstance(crit_spec, int):
        crit = [crit_spec]
    elif isinstance(crit_spec, list):
        crit = crit_spec
    elif isinstance(crit_spec, str):
        crit = []
        for subc in crit_spec.split(","):
            if "-" in subc:
                begin, end = tuple(subc.split("-"))
                crit.extend(range(int(begin), int(end)))
            else:
                crit.append(int(subc))
    else:
        crack_error(
            ValueError, "init_Weights_...",
            "Unrecognized 'crit' (got {}). Should be an int, or list of int, or str of int and ',' and '-'."
            .format(crit_spec))
    return crit
예제 #5
0
def _compute_colors_discrete(values):
    global COLOR_DISTINCT
    nbr_diff_values = len(set(values))
    if nbr_diff_values > len(COLOR_DISTINCT):
        crack_error(
            ValueError, "_compute_colors_discrete",
            "Current color scheme limited to {} different values, while {} different values were provided... Add new colors in the COLOR_DISTINCT of crack.colors."
            .format(len(COLOR_DISTINCT), nbr_diff_values))
    return [COLOR_DISTINCT[value] for value in values]
예제 #6
0
def _model_to_values(model):
    if model["entity"] in ("graph", "hypergraph", "mesh"):
        return [[i] for i in range(model["nbr_n"])]  # IDs
    elif model["entity"] in ["nweights", "eweights", "hweights"]:
        return list(list(l) for l in model["weights"])
    elif model["entity"] in "partition":
        return [[p] for p in model["parts"]]
    else:
        crack_error(ValueError, "_model_to_values",
                    "Unknown entity: {}.".format(model["entity"]))
예제 #7
0
def _adapt_coords(models, geom_key, algopt):
    """Modify the coordinates so that the plot will fit in the size
    required. Also compute the node_radius if not initialized.

    The new coordinates are stored for later use if the same image
    size is required.
    """
    geom = models[geom_key]
    coord = geom["coord"]
    size = algopt["image_size"]
    margin = algopt["margin"]
    # Check if we really need to adapt the coordinates
    geom.setdefault("_plot_opts", {})
    if geom["_plot_opts"].get("coord_adapted_for_size") == size:
        algopt.setdefault("node_radius", geom["_plot_opts"]["node_radius"])
        return
    # Compute node_radius
    topo_entities = ("graph", "hypergraph")
    if "node_radius" not in algopt:
        if geom_key in topo_entities:
            topo_key = geom_key
        else:
            try:
                topo_key = next(key for key in models
                                if isinstance(models[key], dict)
                                and models[key].get("entity") in topo_entities)
            except StopIteration:
                crack_error(ValueError, "plot",
                            "Could not find topological data")
        algopt["node_radius"] = 2 * min_dist_nodes(models[topo_key]) / 5
    # Translation to get positive coordinates beginning at (0,0)
    lmin = list(coord[0])
    lmax = list(coord[0])
    for pt in coord[1:]:
        for c, l in enumerate(pt):
            lmin[c] = min(lmin[c], l)
            lmax[c] = max(lmax[c], l)
    delta = [-pmin for pmin in lmin]
    translate(geom, delta=delta)
    # Homothetic transformation to fit in the required size
    coefs = []
    for pmin, pmax, s in zip(lmin, lmax, size):
        if pmin == pmax:
            coefs.append(1)
        else:
            coefs.append((s - 2 * margin * s) / (pmax - pmin))
    homothetic(geom, coefs)
    # Adapt the node_radius to the homothetic transformation
    _adapt_options_homothetic(algopt, coefs)
    # Translation for horizontal/vertical margin
    delta = [margin * s for s in size]
    translate(geom, delta=delta)
    # Record the transformation
    geom["_plot_opts"]["node_radius"] = algopt["node_radius"]
    geom["_plot_opts"]["coord_adapted_for_size"] = size
예제 #8
0
def get_obj_fcts(models, obj_name, obj_args):
    """How to compute the values (e.g. cut, imbalance) that the user
    usually wants to minimize.
    """
    obj_fct = None
    if obj_name == "cut":
        k_topo = obj_args["key_topology"]
        k_part = obj_args["key_partition"]
        topology = models[k_topo]
        partition = models[k_part]
        if topology["entity"] == "graph":
            k_ewgts = obj_args["key_eweights"]
            eweights = models[k_ewgts]

            def obj_fct(models, records, stats):
                return cut_lambda_minus_one(models,
                                            records,
                                            key_topology=k_topo,
                                            key_eweights=k_ewgts,
                                            key_partition=k_part)

            def gain_fct(i, p_src, p_tgt, stats):
                return gain__cut_lambda_minus_one__graph(
                    topology, eweights, partition, i, p_src, p_tgt, stats)
        elif topo["entity"] == "hypergraph":
            k_hwgts = obj_args["key_hweights"]
            hwgts = models[k_hwgts]

            def obj_fct(models, records, stats):
                return cut_lambda_minus_one(models,
                                            records,
                                            key_topology=k_topo,
                                            key_eweights=k_hwgts,
                                            key_partition=k_part)

            def gain_fct(i, p_src, p_tgt, stats):
                return gain__cut_lambda_minus_one__hypergraph(
                    topo, hwgts, parts, i, p_src, p_tgt, stats)
    elif obj_name == "imbalance":
        nwgts = models[obj_args["key_nweights"]]
        parts = models[obj_args["key_partition"]]

        def gain_fct(i, p_src, p_tgt):
            pass
            # TODO return gain__imbalance(nwgts, parts, i, p_tgt, stats)

    if obj_fct is None:
        crack_error(ValueError, "get_gain_fct", "Unknown gain function.")
    return obj_fct, gain_fct
예제 #9
0
def crack_part(
    l_models,
    l_aggr,
    records,
    tasks,
    phases_index,
    stop_cond=is_end_phase,
    i=0,
    msg="",
):
    """Recursive function
    """
    global NORMAL_FCTS
    nbr_tasks = len(tasks)
    random_seed = 1

    while (i >= 0 and i < nbr_tasks and not stop_cond(l_models, tasks[i])):
        phase = tasks[i]
        # Get the algo #
        algo = phase["algo"]
        args = phase["args"]
        next_phase = phase["next"]
        # Apply the right algo #
        f_start = time()
        models = l_models[-1]
        if algo == "repeat":
            nbr_tests = args["nbr_tests"]
            stop_cond = read_repeat_stop_cond(l_models, records, args["conds"],
                                              args.get("expr"))
            tests = []
            models_ori = copy_models(models)
            # Perform several tries
            for repeat_i in range(nbr_tests):
                msg_i = msg + "{} ".format(repeat_i)
                tests.append(
                    crack_part(
                        l_models,
                        l_aggr,
                        records,
                        tasks,
                        phases_index,
                        stop_cond,
                        i,
                        msg_i,
                    ))
            # Select the best one
            select = args["select"]
        elif algo == "set_random_seed":
            value = args.get("value", 1)
            if value == "random":
                value = rd.randint(0, 1000000)
            elif value == "increasing":
                value = random_seed
                random_seed += 1
            print("  |   Random seed: {}".format(value))
            rd.seed(a=value)
        elif algo in NORMAL_FCTS:
            NORMAL_FCTS[algo](models, records, **args)
        elif algo in COARSEN_FCTS:
            models = COARSEN_FCTS[algo](l_models, l_aggr, records, **args)
        elif algo in PROLONG_FCTS:
            models = PROLONG_FCTS[algo](l_models, l_aggr, records, **args)
        elif algo == "pass":
            pass
        else:
            crack_error(ValueError, "crack_part",
                        "Unknown algorithm (got {}).".format(algo))
        print("'-|-, {}{:16} (took {:.3f}s)".format(msg, algo,
                                                    time() - f_start))
        # Get the next phase
        if next_phase is None:
            i += 1
        else:
            i = get_next_phase(l_models, records, tasks, phases_index,
                               next_phase)
예제 #10
0
def vn_first_part(
        models,
        records,
        key_nweights="nweights",
        key_partition_in="partition",
        key_partition_out="partition",
        iter_nodes="first_cycle",
        iter_parts="first_cycle",
        stop_after=None,
        stop_balanced=False,  # TODO
        targets=None,
        msg=False,
        **algopt):
    """Refine an initial partition by successively moving nodes when
    it decreases the imbalance.

    Arguments:
        nbr_p: Number of parts required.

    Options:
        key_partition_in: str: Key of the Partition in [models] that
            will be refined. (Default is 'partition.)
        key_nweights: str: Key of the Weights in [models] that will be
            partitioned. (Default is 'nweights'.)
        key_partition_out: str: Key to store the Partition in [models].
            (Default is 'partition').
        iter_nodes: 'first_cycle'|'random'
        iter_parts: 'first_cycle'
        stop_after: int or (float in [0, 1])
    """
    ### Arguments
    try:
        nbr_p = algopt["nbr_p"]
    except KeyError:
        crack_error(ValueError, "random_part", "Missing argument(s): nbr_p")
    ### Options ###
    if nbr_p == 2:
        iter_parts = "bipart"
    if key_partition_in != key_partition_out:
        copy_Partition(models, key_partition_in, key_partition_out)
    parts = models[key_partition_out]["parts"]
    key_norm_wgts = key_nweights + "__norm"
    if key_norm_wgts not in models:
        init_Weights_normalized(
            models,
            records,
            key_in=key_nweights,
        )
    nwgts = models[key_norm_wgts]["weights"]
    if stop_after is None:
        stop_after = models[key_nweights]["nbr_n"]
    elif stop_after > 0 and stop_after < 1:
        stop_after = models[key_nweights]["nbr_n"] * stop_after

    start = time()

    ### Initialize ###
    moves_done = 0
    moves_tslm = 0  # Number of move Tested Since Last Move
    moves_tested = 0
    iter_nodes_fct = ITER_NODES_FCTS[iter_nodes]
    iter_nodes_opts = {
        "key_topology": key_norm_wgts,
    }
    iter_parts_fct = ITER_PARTS_FCTS[iter_parts]
    iter_parts_opts = {
        "key_topology": key_norm_wgts,
        "last_p_tgt": nbr_p - 1,
    }
    imbs = imbalances(models,
                      records,
                      key_nweights=key_nweights,
                      key_partition=key_partition_out,
                      targets=targets)
    imb = max(max(imb_cp) for imb_cp in imbs)
    if msg:
        print("'-|-, [vn_first] imb = {:6.5f}".format(imb))

    ### Algorithm ###

    for i in iter_nodes_fct(models, iter_nodes_opts):
        moves_tested += 1
        moves_tslm += 1
        p_src = parts[i]
        ws = nwgts[i]
        for p_tgt in iter_parts_fct(models, nbr_p, p_src, iter_parts_opts):
            new_imbs = imbalances_after_move(ws, p_src, p_tgt, nbr_p, imbs)
            new_imb = max(max(imb_cp) for imb_cp in new_imbs)
            if new_imb < imb:
                # Move the node
                moves_done += 1
                moves_tslm = 0
                parts[i] = p_tgt
                imbs = new_imbs
                imb = new_imb
                update_iter_opts(iter_nodes_opts, restart=True)
                update_iter_opts(iter_parts_opts,
                                 restart=True,
                                 last_p_tgt=p_tgt)
                if msg:
                    print(
                        "  |   [vn_first] {:3d}/{:5d}: moved {:3d} (p{} -> p{}) imb = {:6.5f}"
                        .format(moves_done, moves_tested, i, p_src, p_tgt,
                                new_imb))
                break
        if moves_tslm > stop_after:
            break

    if msg:
        print("  |   [vn_first] {:3d}/{:5d}: imb = {:6.5f}".format(
            moves_done, moves_tested, imb))
예제 #11
0
파일: weights.py 프로젝트: RemiBe/crack
def init_Weights_from_file(models, records, filename=None, extract_keys=None):
    """Initialize the weights from data stored in a file.

    Arguments:
        models: dict: The Weights will be stored here.
        filename: str: Path to the file which contains the data. An
            example of such file is provided below.

    Optional Arguments:
        extract_keys: str or list of str or dict: The keys of the
            weights that will be extracted. If dict, maps the extracted
            keys with the keys that will be used in the current session.
            If None, all weights will be extracted.

    Example: 2 node weights of 3 criteria, 2 edge weights of 1 criterion
        >>> # weights nweights 2 3
        >>> 10 80 13
        >>> 100 93 12
        >>> # weights my_edge_wgts 2 1
        >>> 12
        >>> 39

        If [extract_keys] is None, [models] will be updated with:
        >>> "nweights": {
        >>>     "entity" : "weights",
        >>>     "nbr_n"  : 2,
        >>>     "nbr_c"  : 3,
        >>>     "weights": ((10, 80, 13), (100, 93, 12)),
        >>>     "totals" : (110, 173, 25)
        >>> }
        >>> "my_edge_wgts": {
        >>>     "entity" : "weights",
        >>>     "nbr_n"  : 2,
        >>>     "nbr_c"  : 1,
        >>>     "weights": ((12,), (39,))
        >>>     "totals" : (51,)
        >>> }
    """
    if filename is None:
        crack_error(
            ValueError, "init_Weights_from_file",
            "Need to provide a 'filename' from which the Weights will be read."
        )
    # Which keys will we retrieve data from?
    if extract_keys is not None:
        if isinstance(extract_keys, str):
            extract_keys = {extract_keys: extract_keys}
        elif isinstance(extract_keys, list):
            extract_keys = {k: k for k in extract_keys}
        elif not isinstance(extract_keys, dict):
            crack_error(
                ValueError, "init_Weights_from_file",
                "Wrong type of 'extract_keys' (should be str, list or dict).")
    # Read file to retrieve data
    with open(filename, "r") as f:
        for line in f:
            if not line or line[0] != "#":
                continue
            words = line.split()
            if words[1] != "weights":
                continue
            key_out = None
            if extract_keys is None:
                key_out = words[2]
            elif words[2] in extract_keys:
                key_out = extract_keys[words[2]]
            if key_out is not None:
                nbr_n = int(words[3])
                nbr_c = int(words[4])
                wgts = [None] * nbr_n
                tots = [0] * nbr_c
                for i in range(nbr_n):
                    line = f.readline()
                    wgts[i] = [int(w) for w in line.split()]
                    for c, w in enumerate(wgts):
                        tots[c] += w
                models[key_out] = {
                    "entity": "weights",
                    "nbr_n": nbr_n,
                    "nbr_c": nbr_c,
                    "weights": wgts,
                    "totals": tots,
                }