コード例 #1
0
ファイル: shell.py プロジェクト: chrinide/InferenceEngine
    def _write_json(ebunch, path):
        """

        Parameters
        ----------
        ebunch
        path

        Returns
        -------

        """
        G = Graph()
        G.add_weighted_edges_from(ebunch)
        pos = spring_layout(G)

        nodes = [{
            "id": str(k),
            "label": str(k),
            "size": "100",
            "x": pos[k][0],
            "y": pos[k][1]
        } for k in pos]

        edges = [{
            "id": '%s-%s' % (e[0], e[1]),
            "source": e[0],
            "target": e[1]
        } for e in ebunch]

        with open(path, 'w') as f:
            json.dump({"nodes": nodes, "edges": edges}, f)
コード例 #2
0
ファイル: graph_functions.py プロジェクト: manish-kr/MapGeist
def generate_complete_graph(nodes, distance_matrix):
    """
    Returns a NetworkX 'Graph' instance given the following-
    1. The list of nodes to include in the graph.
    2. Distance matrix having distances between nodes- as a dict
    of dicts
    The complete map does NOT contain self loops at any node.
    """

    #First generate an empty graph
    graph = Graph()

    #Make a list of edges, with appropriate weights
    graph_edges = []

    for i in range(len(nodes) - 1):
        for j in range(i + 1, len(nodes)):
            word1 = nodes[i]
            word2 = nodes[j]
            weight = distance_matrix[word1][word2]
            graph_edges.append((word1, word2, weight))

    #Construct the graph from the edge list
    graph.add_weighted_edges_from(graph_edges)

    #return graph
    return graph
コード例 #3
0
ファイル: graph.py プロジェクト: clausia/z-quantum-core
def weight_graph_edges(graph: nx.Graph,
                       random_weights: bool = False,
                       seed: Optional[int] = None) -> nx.Graph:
    """Update the weights of all the edges of a graph.

    Args:
        graph: nx.Graph
            The input graph.
        random_weights: bool
            Flag indicating whether the weights should be random or constant (1.).

    Returns:
        A networkx.Graph object
    """
    assert not graph.is_multigraph(), "Cannot deal with multigraphs"

    random.seed(seed)
    if random_weights:
        weighted_edges = [(e[0], e[1], uniform(0, 1)) for e in graph.edges]
    else:
        weighted_edges = [(e[0], e[1], 1.0) for e in graph.edges]

    # If edges already present, it will effectively update them (except for multigraph)
    graph.add_weighted_edges_from(weighted_edges)
    return graph
コード例 #4
0
def build_graph(edge_weight):
    """
    建图,无向
    返回一个list,list中每个元素为一个图
    """
    from networkx import Graph
    graph = Graph()
    graph.add_weighted_edges_from(edge_weight)
    return graph
コード例 #5
0
def mesh2graph(faces,
               n=1,
               ordinal=False,
               mask=None,
               vtx_signal=None,
               weight_type=('dissimilar', 'euclidean'),
               weight_normalization=True):
    """
    create graph according to mesh's geometry and vtx_signal

    Parameters
    ----------
    faces : a array with shape (n_triangles, 3)
    n : integer
        specify which ring should be got
    ordinal : bool
        True: get the n_th ring neighbor
        False: get the n ring neighbor
    mask : 1-D numpy array
        specify a area where the ROI is
        non-ROI element's value is zero
    vtx_signal : numpy array
        NxM array, N is the number of vertices,
        M is the number of measurements and time points.
    weight_type : (str1, str2)
        The rule used for calculating weights
        such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
    weight_normalization : bool
        If it is False, do nothing.
        If it is True, normalize weights to [0, 1].
            After doing this, greater the weight is, two vertices of the edge are more related.

    Returns
    -------
    graph : nx.Graph
    """
    row_ind, col_ind, edge_data = mesh2edge_list(faces, n, ordinal, mask,
                                                 vtx_signal, weight_type,
                                                 weight_normalization)
    graph = Graph()
    # Actually, add_weighted_edges_from is only used to add edges. If we intend to create graph by the method only,
    # all of the graph's nodes must have at least one edge. However, maybe some special graphs contain nodes
    # which have no edge connected. So we need add extra nodes.
    if mask is None:
        n_vtx = np.max(faces) + 1
        graph.add_nodes_from(range(n_vtx))
    else:
        vertices = np.nonzero(mask)[0]
        graph.add_nodes_from(vertices)

    # add_weighted_edges_from is faster than from_scipy_sparse_matrix and from_numpy_matrix
    # add_weighted_edges_from is also faster than default constructor
    # To get more related information, please refer to
    # http://stackoverflow.com/questions/24681677/transform-csr-matrix-into-networkx-graph
    graph.add_weighted_edges_from(zip(row_ind, col_ind, edge_data))

    return graph
コード例 #6
0
ファイル: graph.py プロジェクト: fernand/theoutgroup
def get_graph(edges):
    G = Graph()
    G.add_weighted_edges_from(edges)
    nodes = G.nodes()
    print(f'Num initial nodes after MIN_DIST: {len(nodes)}')
    for n in nodes:
        if G.degree(n) < MIN_DEGREE:
            G.remove_node(n)
    print(f'Num nodes after degree filter: {len(G.nodes())}')
    return G
コード例 #7
0
 def from_mininet(cls, net):
     w_links = []
     for link in net.links:
         intf1, intf2 = link.intf1, link.intf2
         n1, n2 = intf1.node.name, intf2.node.name
         bw = 0
         if "bw" in intf1.params:
             bw = intf1.params["bw"]
         w_links.append((n1, n2, bw))
     g = Graph()
     g.add_weighted_edges_from(w_links)
     return cls(g, net)
コード例 #8
0
ファイル: NetworkParser.py プロジェクト: ayelkina/Alhe
    def load_graph_from_txt(self, filename):
        graph = Graph()
        links_list = []
        cost_list = []
        file = open(filename, "r")
        data = file.read().splitlines()
        for link in data:
            n1, n2, distance = link.split(' ')
            links_list.append((n1, n2, float(distance)))
            cost_list.append(float(distance))

        self.sorted_costs = sorted(cost_list)
        graph.add_weighted_edges_from(links_list)
        file.close()
        return graph
コード例 #9
0
def singleSourceSP(G: nx.Graph, A_i: list):
    """Return node: (dist, p(node)) for A_i

    Arguments:
        G {nx.Graph} -- [description]
        A_i {list} -- [description]
    """

    virtNodeId = len(G.nodes())
    G.add_node(virtNodeId)
    G.add_weighted_edges_from([(i, virtNodeId, 0) for i in A_i])
    distances, paths = nx.single_source_dijkstra(G, virtNodeId)
    G.remove_node(virtNodeId)

    path = {node: paths[node][1] for node in paths if node != virtNodeId}

    return distances, path
コード例 #10
0
def mesh2graph(faces,
               n=1,
               ordinal=False,
               vtx_signal=None,
               weight_type=('dissimilar', 'euclidean'),
               weight_normalization=False):
    """
    create graph according to mesh's geometry and vtx_signal

    Parameters
    ----------
    faces : a array with shape (n_triangles, 3)
    n : integer
        specify which ring should be got
    ordinal : bool
        True: get the n_th ring neighbor
        False: get the n ring neighbor
    vtx_signal : numpy array
        NxM array, N is the number of vertices,
        M is the number of measurements and time points.
    weight_type : (str1, str2)
        The rule used for calculating weights
        such as ('dissimilar', 'euclidean') and ('similar', 'pearson correlation')
    weight_normalization : bool
        If it is False, do nothing.
        If it is True, normalize weights to [0, 1].
            After doing this, greater the weight is, two vertices of the edge are more related.

    Returns
    -------
    graph : nx.Graph
    """

    row_ind, col_ind, edge_data = mesh2edge_list(faces, n, ordinal, vtx_signal,
                                                 weight_type,
                                                 weight_normalization)
    graph = Graph()
    # add_weighted_edges_from is faster than from_scipy_sparse_matrix and from_numpy_matrix
    # add_weighted_edges_from is also faster than default constructor
    # To get more related information, please refer to
    # http://stackoverflow.com/questions/24681677/transform-csr-matrix-into-networkx-graph
    graph.add_weighted_edges_from(zip(row_ind, col_ind, edge_data))

    return graph
コード例 #11
0
def _weight_graph_edges(
    graph: nx.Graph,
    sampler: Sampler,
    seed: Optional[int] = None,
) -> nx.Graph:
    """Update the weights of all the edges of a graph in place.

    Args:
        graph: The graph to mutate.
        sampler:
    """
    if graph.is_multigraph():
        raise ValueError("Cannot deal with multigraphs")

    if seed is not None:
        random.seed(seed)

    weighted_edges = [(e[0], e[1], next(sampler)) for e in graph.edges]

    # If edges already present, it will effectively update them (except for multigraph)
    graph.add_weighted_edges_from(weighted_edges)
コード例 #12
0
ファイル: NetworkParser.py プロジェクト: ayelkina/Alhe
    def load_graph_from_xml(self, filename):
        doc = minidom.parse("sndlib-networks-xml/" + filename)
        link_elements = doc.getElementsByTagName("link")

        graph = Graph()
        link_tuples = []
        cost_list = []

        for link in link_elements:
            source = link.getElementsByTagName(
                "source")[0].firstChild.wholeText
            target = link.getElementsByTagName(
                "target")[0].firstChild.wholeText
            cost = float(
                link.getElementsByTagName("additionalModules")
                [0].getElementsByTagName("addModule")[0].getElementsByTagName(
                    "cost")[0].firstChild.wholeText)

            cost_list.append(cost)
            link_tuples.append((source, target, cost))

        self.sorted_costs = sorted(cost_list)
        graph.add_weighted_edges_from(link_tuples)
        return graph
コード例 #13
0
class ClusterNetwork(object):
    def __init__(self, reps):
        self.g = Graph()
        self.N = len(reps.keys())
        nodes = []
        self.lookup = {}
        self.attributes = None
        for i, r in enumerate(sorted(reps.keys())):
            self.lookup[r] = i
            if self.attributes is None:
                self.attributes = list(reps[r].attributes.keys())
            nodes.append((i, {'rep': reps[r]}))
        self.g.add_nodes_from(nodes)
        self.clusters = None

    def __iter__(self):
        for i, d in self.g.nodes_iter(data=True):
            yield d

    def __len__(self):
        return self.N

    def __getitem__(self, key):
        if isinstance(key, str):
            return self.g.node[self.lookup[key]]
        elif isinstance(key, tuple):
            return self.simMat[key]
        return self.g.node[key]

    def cluster(self, scores, cluster_method, oneCluster):
        #Clear any edges
        self.g.remove_edges_from(list(self.g.edges_iter(data=False)))

        if cluster_method is None:
            return
        if scores is not None:
            self.simMat = zeros((self.N, self.N))
            for k, v in scores.items():
                indOne = self.lookup[k[0]]
                indTwo = self.lookup[k[1]]
                self.simMat[indOne, indTwo] = v
                self.simMat[indTwo, indOne] = v
            self.simMat = -1 * self.simMat
        if cluster_method == 'affinity':
            true_labels = array(
                [self[i]['rep']._true_label for i in range(self.N)])
            self.clusters = affinity_cluster(self.simMat, true_labels,
                                             oneCluster)
            edges = []
            for k, v in self.clusters.items():
                for v2 in v:
                    if v2[0] == k:
                        continue
                    edges.append((k, v2[0], v2[1]))
        elif cluster_method == 'complete':
            edges = []
            for i in range(self.N):
                for j in range(i + 1, self.N):
                    edges.append((i, j, self.simMat[i, j]))
        self.g.add_weighted_edges_from(edges)
        seed = RandomState(seed=3)
        mds = manifold.MDS(n_components=2,
                           max_iter=3000,
                           eps=1e-9,
                           random_state=seed,
                           dissimilarity="precomputed",
                           n_jobs=4)
        pos = mds.fit(-1 * self.simMat).embedding_
        clf = PCA(n_components=2)
        pos = clf.fit_transform(pos)
        for i, p in enumerate(pos):
            self.g.node[i]['pos'] = p

    def calc_reduction(self):
        if self.clusters is None:
            return
        means = {}
        reverse_mapping = {}
        for k, v in self.clusters.items():
            s = 0
            for ind in v:
                reverse_mapping[ind[0]] = k
                s += ind[1]
            means[k] = s / len(v)
        for i in self.g.nodes_iter():
            clust_center = reverse_mapping[i]
            if i == clust_center:
                self.g.node[i]['HyperHypoMeasure'] = 0
                continue
            dist = self.g[i][clust_center]['weight']
            norm_dist = abs(dist - means[clust_center])
            len_diff = self[clust_center]['representation'].shape[0] - self[i][
                'representation'].shape[0]
            if len_diff < 0:
                norm_dist *= -1
            self.g.node[i]['HyperHypoMeasure'] = norm_dist
        if 'HyperHypoMeasure' not in self.attributes:
            self.attributes.append('HyperHypoMeasure')

    def get_edges(self):
        return array(self.g.edges(data=False))

    def labels(self):
        labels = list(range(len(self.g)))
        for k, v in self.clusters.items():
            for v2 in v:
                labels[v2[0]] = k
        true_labels = list()
        for i in range(len(labels)):
            true_labels.append(self[i]['rep']._true_label)
        levels = {x: i for i, x in enumerate(set(true_labels))}
        for i in range(len(true_labels)):
            true_labels[i] = levels[true_labels[i]]
        return array(labels), array(true_labels)

    def silhouette_coefficient(self):
        labels, true_labels = self.labels()
        return metrics.silhouette_score(self.simMat,
                                        labels,
                                        metric='precomputed')

    def homogeneity(self):
        labels, true_labels = self.labels()
        return metrics.homogeneity_score(true_labels, labels)

    def completeness(self):
        labels, true_labels = self.labels()
        return metrics.completeness_score(true_labels, labels)

    def v_score(self):
        labels, true_labels = self.labels()
        return metrics.v_measure_score(true_labels, labels)

    def adjusted_mutual_information(self):
        labels, true_labels = self.labels()
        return metrics.adjusted_mutual_info_score(true_labels, labels)

    def adjusted_rand_score(self):
        labels, true_labels = self.labels()
        return metrics.adjusted_rand_score(true_labels, labels)
コード例 #14
0
        logging.info('\nNumber of terms appear in the resulting WDNF is %d.' %
                     len(final_wdnf.coefficients))
        return PolynomialEstimator(final_wdnf)

    def get_initial_point(self):
        """
        """
        return dict.fromkeys(self.X, 0.0)  # MAP TO INTEGER!


if __name__ == "__main__":
    B = Graph()
    B.add_nodes_from([1, 2, 3, 4, 5, 6], bipartite=0)
    B.add_nodes_from(['a', 'b'], bipartite=1)
    B.add_weighted_edges_from([(1, 'a', 0.5), (2, 'a', 0.25), (3, 'a', 3.0),
                               (4, 'a', 4.0), (5, 'a', 2.0), (6, 'a', 1.0),
                               (1, 'b', 1.0), (2, 'b', 1.0), (3, 'b', 1.0),
                               (4, 'b', 1.0), (5, 'b', 1.0), (6, 'b', 1.0)])
    #    print(B.get_edge_data(1, 'a')['weight'])
    #    print(B.edges(data = True))
    #    x = ['m1', 'm2', 'm3', 'm4']
    #    print(x[:4])
    newProb = FacilityLocation(B, 2)
    Y2, track2, bases2 = newProb.polynomial_continuous_greedy(0.0, 5, 10)
#    print(Y2)

# parser = argparse.ArgumentParser(description = 'Run the Continuous Greedy Algorithm',
# formatter_class = argparse.ArgumentDefaultsHelpFormatter)
# parser.add_argument('output')
# parser.add_argument('--iterations', help = 'Number of iterations in the Frank-Wolfe Algorithm')
# parser.add_argument('--num_of_samples', help = 'Number of samples generated by the Sampler Estimator')
# parser.add_argument('--degree', help = 'Order of the Taylor expansion used by the Polynomial Estimator')
コード例 #15
0
class GraphConverted(object):
    def __init__(self, g, net):
        self._g = g
        self._net = net
        edges = [e for e in g.edges]
        w_edges = [(u, v, 1) for u, v in edges]
        self._support_g = Graph()
        self._support_g.add_weighted_edges_from(w_edges)
        w_edges = [(u, v, 0) for u, v in edges]
        self._total_bw_g = Graph()
        self._total_bw_g.add_weighted_edges_from(w_edges)

    def getNetwork(self):
        return self._g

    def getSupport(self):
        return self._support_g

    def getTotalBwNetwork(self):
        return self._total_bw_g

    def getMininetNet(self):
        return self._net

    @staticmethod
    def getNetworkPorts(net):
        ports = dict()
        links = net.links
        for link in links:
            node1, node2 = [
                intf.node.name for intf in [link.intf1, link.intf2]
            ]

            port1, port2 = [
                intf.name.split("-")[1][3:]
                for intf in [link.intf1, link.intf2]
            ]

            ports[(node1, node2)] = (port1, port2)
            ports[(node2, node1)] = (port2, port1)
        return ports

    @classmethod
    def from_mininet(cls, net):
        w_links = []
        for link in net.links:
            intf1, intf2 = link.intf1, link.intf2
            n1, n2 = intf1.node.name, intf2.node.name
            bw = 0
            if "bw" in intf1.params:
                bw = intf1.params["bw"]
            w_links.append((n1, n2, bw))
        g = Graph()
        g.add_weighted_edges_from(w_links)
        return cls(g, net)

    def find_min_path(self, source, dest, weight="weight"):
        g = self._support_g
        return shortest_path(g, source, dest, weight=weight)

    def update_path_weights(self, path, weight):
        if len(path) < 2:
            raise ValueError("the path should include at least 2 nodes")
        g = self._support_g
        g_ = self._total_bw_g
        for s, d in zip(path[:-1], path[1:]):
            g[s][d]["weight"] += weight
            g_[s][d]["weight"] += weight

    def path_rules(self, path):
        ports = self.getNetworkPorts(self._net)
        src_name = path[0]
        dst_name = path[-1]
        src = self._net.getNodeByName(src_name)
        dst = self._net.getNodeByName(dst_name)
        src_mac = src.MAC()
        dst_mac = dst.MAC()
        src_ip = src.IP()
        dst_ip = dst.IP()
        rules = dict()
        for u, v in zip(path[:-1], path[1:]):
            if u not in rules:
                rules[u] = []
            if v not in rules:
                rules[v] = []

            uport, vport = ports[(u, v)]
            rules[u].append({
                "src_ip": src_ip,
                "src_mac": src_mac,
                "dst_ip": dst_ip,
                "dst_mac": dst_mac,
                "out_port": uport
            })

            rules[v].append({
                "src_ip": dst_ip,
                "src_mac": dst_mac,
                "dst_ip": src_ip,
                "dst_mac": src_mac,
                "out_port": vport
            })
        return rules

    @abstractmethod
    def getRules(self):
        pass
コード例 #16
0
            edges = [(vtx, vtx_neighbor) for vtx in vertices_thr_FFA
                     for vtx_neighbor in edge_list[vtx]]
            w = method.split('_')[1]
            if w == 'weighted':
                edge_data = [
                    pdist(np.array([[lmap[i]], [lmap[j]]]))[0]
                    for i, j in edges
                ]
                max_dissimilar = np.max(edge_data)
                min_dissimilar = np.min(edge_data)
                edge_data = [
                    (max_dissimilar - dist) / (max_dissimilar - min_dissimilar)
                    for dist in edge_data
                ]
                edges = np.array(edges)
                graph.add_weighted_edges_from(
                    zip(edges[:, 0], edges[:, 1], edge_data))
            elif w == 'unweighted':
                graph.add_edges_from(edges)
            else:
                raise RuntimeError("invalid method: {}".format(method))
            patches = get_patch_by_LV(graph)
        else:
            raise RuntimeError(
                "the method - {} is not supported at present!".format(method))

        for label, patch in enumerate(patches, 1):
            lFFA_patch_maps[idx, patch] = label
        patch_stat.append(str(len(patches)))
        patch_stat.extend([str(len(patch)) for patch in patches])
        lFFA_patch_stats.append(','.join(patch_stat))
コード例 #17
0
        for line in lines:
            line = line.split('::')
            movie = int(line[0])
            genre = line[-1].split('|')[0].strip()
            if genre in target_partitions:
                target_partitions[genre].add(movie)
                # print(target_partitions[genre])
            else:
                target_partitions[genre] = {movie}
                # print(target_partitions[genre])
    logging.info('...done.')

    B = Graph()
    B.add_nodes_from(movies, bipartite=0)
    B.add_nodes_from(users, bipartite=1)
    B.add_weighted_edges_from(ratings)

    user_degrees = dict(
        list(
            B.degree([
                node for node, d in B.nodes(data=True) if d['bipartite'] == 1
            ])))
    descending_degrees = sorted(user_degrees.values(), reverse=True)
    user_indices = sorted(range(1,
                                len(user_degrees.values()) + 1),
                          key=lambda k: user_degrees.values()[k - 1],
                          reverse=True)
    top_n_user_indices = user_indices[:n]
    movies_sub = list(B.neighbors(top_n_user_indices[0]))[:n]

    B = B.subgraph(top_n_user_indices + movies_sub).copy()
コード例 #18
0
            # Ensures no duplicates (e.g. 1->2 & 2->1) and no 0 distances (e.g. 1->1, 2->2 e.t.c.)
            dis_dic[count] = dists  # Serial number = key
            count += 1

G = Graph()

for idx, val in enumerate(coordinates_list):
    G.add_node(idx, pos=val)

mix = list(combinations((i for i in range(G.number_of_nodes())), 2))
# List for all combinations of points for linking each point together in graph

G.add_edges_from(mix)

for ind, i in enumerate(dis_dic):
    G.add_weighted_edges_from([(mix[ind][0], mix[ind][1], dis_dic[ind])])

tour = [0] * 31
count = 0
county_nums = [0] * 31
p = 0
prev_node = 0

while 0.0 in tour:
    cost = 10000
    for i, j in G.adj[p].items():
        if j['weight'] < cost:
            cost = j['weight']
            county_nums[count] = i
            tour[count] = items[i][0]
            p = i