def __init__(self,graph,roots,max_hop=10,tsfr_max=25,grbpath=None):
     """
     """
     suffix = datetime.datetime.now().isoformat().replace(':','-').replace('.','-')
     suffix = ''
     self.tmp = grbpath+"gurobi/"+suffix+"-"
     self.edges = list(graph.edges())
     self.nodes = list(graph.nodes())
     print("Number of edges:",len(self.edges))
     self.hindex = [i for i,node in enumerate(self.nodes) if node not in roots]
     self.tindex = [i for i,node in enumerate(self.nodes) if node in roots]
     self.A = nx.incidence_matrix(graph,nodelist=self.nodes,
                                  edgelist=self.edges,oriented=True)
     self.I = nx.incidence_matrix(graph,nodelist=self.nodes,
                                  edgelist=self.edges,oriented=False)
     COST = nx.get_edge_attributes(graph,name='cost')
     LENGTH = nx.get_edge_attributes(graph,name='length')
     LOAD = nx.get_node_attributes(graph,name='load')
     self.c = np.array([1e-3*COST[e] for e in self.edges])
     self.l = np.array([1e-3*LENGTH[e] for e in self.edges])
     self.p = np.array([1e-3*LOAD[self.nodes[i]] for i in self.hindex])
     self.model = grb.Model(name="Get Spiders")
     self.model.ModelSense = grb.GRB.MINIMIZE
     self.__variables()
     self.__radiality()
     self.__heuristic(M=max_hop)
     self.__powerflow(M=tsfr_max)
     self.__objective()
     self.model.write(self.tmp+"secondary.lp")
     self.optimal_edges = self.__solve()
     return
def test_line_graph_batch():
    """
    Tests that `line_graph` works with multiple graphs in a batch.
    """
    # Arrange.
    # Create test graphs.
    graph1 = nx.house_graph()
    incidence1 = nx.incidence_matrix(graph1).toarray()
    # We will have to pad the first incidence matrix with two columns,
    # because the second graph has two extra edges.
    incidence1 = np.pad(incidence1, ((0, 0), (0, 2)))
    graph2 = nx.house_x_graph()
    incidence2 = nx.incidence_matrix(graph2).toarray()

    # Act.
    # Get the line graph for both graphs individually, and both together
    # as a batch.
    graph1_line = convolution.line_graph(incidence1)
    graph2_line = convolution.line_graph(incidence2)

    batch = np.stack([incidence1, incidence2], axis=0)
    line_graph_batch = convolution.line_graph(batch)

    # Assert.
    # Both ways of doing it should produce the same results.
    np.testing.assert_array_equal(graph1_line, line_graph_batch[0])
    np.testing.assert_array_equal(graph2_line, line_graph_batch[1])
Exemple #3
0
    def phase_dev(self, phase):
        D = (nx.incidence_matrix(self.G, oriented=True,
                                 weight='weight')).todense()  #incidence
        N = np.random.normal(0, 20, [len(D[0]), 1])

        # How to handle Rs
        #rsq = self.r_states[:,-1] * self.r_states[:,-1].T

        #How to bring phases together; this is the intrinsic alpha mode
        bring_in = self.w - np.multiply(
            self.r_states[:, -1],
            self.K / len(self.G) * D * np.sin(D.T * self.states[:, -1])) + N

        # Control is done HERE
        D_ctrl = (nx.incidence_matrix(self.G_ctrl,
                                      oriented=True,
                                      weight='weight')).todense()
        #bring_out = self.K_u / len(self.G) * D_ctrl * np.cos(D_ctrl.T * self.states[:,-1])
        bring_stim = self.K_u / len(self.G) * D_ctrl * np.sin(
            D_ctrl.T * self.states[:, -1]
        ) + D_ctrl * 200 * D_ctrl.T * np.ones_like(self.states[:, -1])

        # Inputs are done HERE
        # TREAT STIM LIKE A "PATHOLOGY FIXER" to let the brain's intrinsic dynamics do their thing.
        # So, basically, INPUTS should be desyncing and STIM should be pure *blocking*
        D_inp = (nx.incidence_matrix(self.G_inp,
                                     oriented=True,
                                     weight='weight')).todense()
        input_out = self.K_i / len(self.G) * D_inp * np.cos(
            D_inp.T * self.states[:, -1])

        return bring_in + bring_stim + input_out + N
    def _evolve(self, tarr, initguess=None):
        """
        Evolves the flow network from `initguess` by timesteps in `tarr`
        """
        M = nx.incidence_matrix(self, oriented=True).toarray()
        Mw = nx.incidence_matrix(self, oriented=True, weight=self.weight_attr).toarray()
        P = np.array([self.node[n]['input'] for n in self.nodes()])

        if initguess is None:
            initguess = _random_stableop_initguess(self.number_of_nodes())

        return odeint(_kuramoto_ode, initguess, t=tarr, args=(M, Mw, P))
Exemple #5
0
    def build_incidence_matrices(self):
        nodelist = self.node_list
        tree_edgelist = self.span_tree
        chordes_edgelist = self.chordes

        A_full = nx.incidence_matrix(self.span_tree, nodelist=nodelist, edgelist=tree_edgelist, oriented=True)
        A_full = -1 * A_full.toarray()
        A_truncated = A_full[:-1]

        A_chordes_full = nx.incidence_matrix(self.chordes, nodelist=nodelist, edgelist=chordes_edgelist, oriented=True)
        A_chordes_full = -1 * A_chordes_full.toarray()
        A_chordes_truncated = A_chordes_full[:-1]
        return A_truncated, A_chordes_truncated
Exemple #6
0
    def _evolve(self, tarr, initguess=None):
        """
        Evolves the flow network from `initguess` by timesteps in `tarr`
        """
        M = nx.incidence_matrix(self, oriented=True).toarray()
        Mw = nx.incidence_matrix(self, oriented=True,
                                 weight=self.weight_attr).toarray()
        P = np.array([self.node[n]['input'] for n in self.nodes()])

        if initguess is None:
            initguess = _random_stableop_initguess(self.number_of_nodes())

        return odeint(_kuramoto_ode, initguess, t=tarr, args=(M, Mw, P))
Exemple #7
0
def test_graph_convolution():
    # Define convolution layer
    node_in, node_out = np.random.randint(1, 10, size=2)
    edge_in, edge_out = np.random.randint(1, 10, size=2)
    gconv = GraphConvolution(node_in, node_out, edge_in, edge_out)
    gconv.to(device)

    # Define graph
    num_nodes = 10
    p = 0.5
    batch_size = 4
    G = nx.fast_gnp_random_graph(num_nodes, p, directed=True)
    A = nx.adjacency_matrix(G).todense()
    B = nx.incidence_matrix(G, oriented=True).T.todense()
    V_size = (batch_size, G.number_of_nodes(), node_in)
    E_size = (batch_size, G.number_of_edges(), edge_in)

    # Create torch tensors
    A = torch.tensor(A, dtype=float)
    B = torch.tensor(B, dtype=float)
    V = torch.randn(V_size, dtype=float)
    E = torch.randn(E_size, dtype=float)

    # Forward pass
    Vout, Eout = gconv(A, B, V, E)

    # Assert output shapes are correct
    assert Vout.shape == (batch_size, G.number_of_nodes(), node_out)
    assert Eout.shape == (batch_size, G.number_of_edges(), edge_out)
def simulate_edge_flows2(G, weighted=False):
    """ Like simulate_edge_flows but uses QR decomposition
    instead of matrix inversion
    """
    if weighted:
        wts = []
        for u, v, d in G.edges_iter(data=True):
            d['cond'] = np.sqrt(d['conductivity']**4/d['weight'])
            wts.append(d['cond'])

        wts = np.array(wts)
        wts_sqr = wts**2
    else:
        for u, v, d in G.edges_iter(data=True):
            d['cond'] = 1.
    
    # construct reduced incidence matrix
    d1 = np.array(nx.incidence_matrix(G, oriented=True, 
        weight='cond'))[1:,:]
    
    Q, _ = np.linalg.qr(d1.T)
    A = np.dot(Q, Q.T)
    
    if weighted:
        A = (wts)*(A*wts.T).T

    diag_inds = np.diag_indices(A.shape[0])

    if weighted:
        A[diag_inds] = wts_sqr - A[diag_inds]
    else:
        A[diag_inds] = 1 - A[diag_inds]
    return A
Exemple #9
0
def reoptimize_network(G,
                       N,
                       gamma,
                       index_source_node,
                       sigma,
                       K=1,
                       threshold=1e-6):
    """Take the given graph G which is assumed to be a minimum dissipation network for a set of parameters,
    now using different parameters to again optimize the underlying network for the new parameters.
    The method used to find capacities that locally minimize the network dissipation was adapted from
    
    F.Corson, "Fluctuations and Redundancy in Optimal Transport Networks", Physical Review Letters (4), 048703, 2010
    """
    sigma = sigma
    K = K
    nof_nodes = len(list(G.nodes()))

    correlation_matrix_sources = np.ones((nof_nodes, nof_nodes))
    np.fill_diagonal(correlation_matrix_sources, 1 + sigma**2)
    correlation_matrix_sources[
        index_source_node, :] = -(nof_nodes - 1) - sigma**2
    correlation_matrix_sources[:,
                               index_source_node] = -(nof_nodes - 1) - sigma**2
    correlation_matrix_sources[
        index_source_node,
        index_source_node] = (nof_nodes - 1)**2 + (nof_nodes - 1) * sigma**2
    capacities = np.array([
        G[list(G.edges())[i][0]][list(G.edges())[i][1]]['weight']
        for i in range(len(G.edges()))
    ])
    capacities /= (np.sum(capacities**gamma) / K**gamma)

    last_change = 1e5
    iterations = 0
    while last_change > threshold:
        #if iterations % 10==0:
        #    print('Iterations ' + str(iterations))
        #    print('Last change ' +str(last_change))
        line_capacities = np.array([G[u][v]['weight'] for u, v in G.edges()])
        L = nx.laplacian_matrix(G).A
        B = np.linalg.pinv(L)
        I = nx.incidence_matrix(G, oriented=True).A
        flow_correlations = np.linalg.multi_dot([
            np.diag(line_capacities), I.T, B, correlation_matrix_sources, B, I,
            np.diag(line_capacities)
        ])
        new_line_capacities = np.zeros(len(G.edges()))
        for i in range(len(flow_correlations)):
            new_line_capacities[i] = flow_correlations[i, i]**(
                1 / (1 + gamma)) / (np.sum(
                    np.diag(flow_correlations)
                    **(gamma / (1 + gamma))))**(1 / gamma) * K
        new_cap_dict = {
            list(G.edges())[i]: new_line_capacities[i]
            for i in range(len(G.edges()))
        }
        nx.set_edge_attributes(G, new_cap_dict, 'weight')
        last_change = np.sum((new_line_capacities - line_capacities)**2)
        iterations += 1
    return G
Exemple #10
0
def calc_average_dissipation(G, sigma, mu, index_source_node):
    """Calculate average dissipation for the network encoded by G"""

    line_capacities = np.array([G[u][v]['weight'] for u, v in G.edges()])

    N = len(G.nodes())

    correlation_matrix_sources = np.ones((N, N))
    np.fill_diagonal(correlation_matrix_sources, mu**2 + sigma**2)
    correlation_matrix_sources[
        index_source_node, :] = -(N - 1) * mu**2 - sigma**2
    correlation_matrix_sources[:,
                               index_source_node] = -(N - 1) * mu**2 - sigma**2
    correlation_matrix_sources[
        index_source_node,
        index_source_node] = (N - 1)**2 * mu**2 + (N - 1) * sigma**2

    L = nx.laplacian_matrix(G).A
    B = np.linalg.pinv(L)
    I = nx.incidence_matrix(G, oriented=True).A
    flow_correlations = np.linalg.multi_dot([
        np.diag(line_capacities), I.T, B, correlation_matrix_sources, B, I,
        np.diag(line_capacities)
    ])
    dissipation = np.sum(np.diag(flow_correlations) / line_capacities)
    return dissipation
Exemple #11
0
def test_line_5():
    G = nx.path_graph(5)
    C = np.asarray(nx.incidence_matrix(G).todense())
    H = hg.Hypergraph(C)
    Ch = H.incidence_matrix()
    Ce = np.array([[1,1,1,0,0],[0,0,1,1,1]]).T
    assert np.all(Ce == Ch)
def run_experiment(G, h, step_count, eps, d_update):
    all_edges_fine = False
    A = sp.sparse.csc_matrix(
        nx.incidence_matrix(G, nodelist=G.nodes(), oriented=True))
    g = np.array([G[v][w][ID]['battery'] for (v, w, ID) in G.edges(keys=True)])
    b = np.array([G.node[v]['demand'] for v in G.nodes()])
    i = 0
    node_dict = {}
    for node in G.nodes():
        node_dict[node] = i
        i += 1
    i = 0
    arc_dict = {}
    for arc in G.edges(keys=True):
        arc_dict[arc] = i
        i += 1
    pi = np.zeros(G.number_of_nodes())
    q = np.zeros(G.number_of_edges())

    while not all_edges_fine:
        R = sp.sparse.diags([[
            G[a[0]][a[1]][a[2]]['s'] / G[a[0]][a[1]][a[2]]['x']
            for a in G.edges(keys=True)
        ]], [0])
        L = A.dot(sp.sparse.linalg.inv(R).dot(A.T))
        rhs = b - A.dot(sp.sparse.linalg.inv(R).dot(g))

        compute_electrical_flow_general(G, node_dict, arc_dict, g, b, A, R, L,
                                        rhs, pi, q)
        step(G, h, d_update)
        all_edges_fine = True
        for a in G.edges(keys=True):
            v, w, ID = a
            if not (G[v][w][ID]['x'] <= eps or G[v][w][ID]['x'] >= 1 - eps):
                all_edges_fine = False
Exemple #13
0
def setupMatrix1():
    nodes = [0, 1, 2, 3, 4, 5, 6, 7]
    edges = [
        [0, 1],
        [0, 6],
        [0, 7],
        [1, 2],
        [1, 7],
        [2, 1],
        [2, 7],
        [3, 5],
        [3, 7],
        [4, 5],
        [5, 6],
        [6, 5],
        [7, 6],
    ]

    G = nx.DiGraph()
    G.add_nodes_from(nodes)
    G.add_edges_from(edges)

    incidence_matrix = -nx.incidence_matrix(G, oriented=True)

    return np.array(incidence_matrix.toarray())
Exemple #14
0
    def _makeg(self, N, K, E):
        """
        creat G matrix for inequality constrains

        (K + 4) * N + 2 * K + 2 * E + 1 rows
        """
        adj = np.array(
            nx.adjacency_matrix(G=self.graph, nodelist=list(
                range(N))).todense()) + np.eye(N)
        inc = np.array(
            nx.incidence_matrix(G=self.graph,
                                nodelist=list(range(N))).todense())

        net_apx = nx.random_geometric_graph(self.graph.order(),
                                            self.jam_radius,
                                            pos=self.pos)
        apx = np.array(
            nx.adjacency_matrix(G=net_apx, nodelist=list(
                range(N))).todense()) + np.eye(N)
        G = np.concatenate(
            (self._makeg1(N, K), self._makeg2(N, K), self._makeg3(
                N, K), self._makeg4(N, K, apx), self._makeg5(
                    N, K, E, inc), self._makeg6(N, K), self._makeg7(N, K)),
            axis=0)
        return G
Exemple #15
0
def hardy_cross(G, n, init_guess=None):
    cycles = nx.cycle_basis(G)
    cycles = np.array([[tuple([cycles[j][i], cycles[j][i+1]]) if (i < len(cycles[j])-1) else tuple([cycles[j][i], cycles[j][0]]) for i in range(len(cycles[j]))] for j in range(len(cycles))])

    L = [G.node[i]['demand'] for i in G.node.keys()]
    edges = np.array(G.edges())
    edge_idx = np.full((len(G), len(G)), 9999, dtype=int)
    edge_idx[edges[:,0], edges[:,1]] = np.arange(len(G.edges()))
    edge_idx[edges[:,1], edges[:,0]] = np.arange(len(G.edges()))
    
    edge_dir = np.zeros((len(G), len(G)), dtype=int)
    edge_dir[edges[:,0], edges[:,1]] = 1
    edge_dir[edges[:,1], edges[:,0]] = -1

    if init_guess == None:
        init_guess = np.linalg.lstsq(nx.incidence_matrix(G, oriented=True).toarray(), L)[0]
    A = init_guess.copy().astype(float)
    for i in range(n):
        for u in cycles:
            R = np.array([G[j[0]][j[1]]['weight'] for j in u])
            D = np.array(edge_dir[u[:,0], u[:,1]])
            C = np.array(A[edge_idx[u[:,0], u[:,1]]])
            C = C*D
            dV = (R*C).sum()
            di = dV/R.sum()
	    C = (C - di)*D
	    A[edge_idx[u[:,0], u[:,1]]] = C
    return A
Exemple #16
0
def hydraulics_known_flows_wo_loops_prop_to_edges(G, m_node):
    A = nx.incidence_matrix(G, oriented=True).todense()
    A = A[1:,:]
    m_node = m_node[1:]
    flows = np.linalg.solve(A, m_node)
    G = properties_to_edges(G, {'mass_flows': flows})
    return flows
Exemple #17
0
def hydraulics_known_flows_wo_loops_sparse(G, m_node):
    import scipy
    A = nx.incidence_matrix(G, oriented=True)
    A = A[1:,:]
    m_node = m_node[1:]
    flows = scipy.sparse.linalg.spsolve(A, m_node)
    return flows
Exemple #18
0
    def __init__(self, K=2, start_state=None):
        super(KNet, self).__init__(dt=0.0001)

        #self.K = K
        #Kt is the timeseries of global connectivity
        self.Kt = np.random.normal(0.1, 0.01, size=self.tvect.shape)
        half_pt = int(np.floor(self.Kt.shape[0] / 3))
        self.Kt[half_pt:] += K

        #self.K is a static parameter used to construct the L
        # Should probably change the names and split these out
        self.K = 6

        self.make_L_struct()
        self.set_connectivity()

        self.D = (nx.incidence_matrix(self.G, oriented=True,
                                      weight='weight')).todense()

        if start_state is None:
            self.state = np.random.normal(0, np.pi, (self.N * self.R, 1))
        else:
            self.state = start_state

        self.w = rand.normal(5, 1, size=self.state.shape) * 4
        self.dynamics = lambda D, x, k: self.w - k * D * np.sin(D.T * x)
        self.post_dynamics = lambda x: x % (2 * np.pi)
Exemple #19
0
def net_flow(G, efficiency='speed'):
    if not nx.is_connected(G):
        raise DisconnectedGraphError

    L = nx.laplacian_matrix(G, weight=None).toarray()
    C = np.zeros(L.shape)
    C[1:, 1:] = np.linalg.inv(L[1:, 1:])

    N = G.number_of_nodes()
    E = G.number_of_edges()
    B = nx.incidence_matrix(G, oriented=True).T  #shape=(nodes,edges)

    if efficiency == 'memory':
        values = np.zeros(G.number_of_edges())
        for idx, B_row in enumerate(B):
            F_row = B_row @ C
            rank = rankdata(F_row)
            values[idx] = np.sum((2 * rank - 1 - N) * F_row)
    elif efficiency == 'speed':
        F = B @ C
        F_ranks = np.apply_along_axis(rankdata, arr=F, axis=1)
        values = np.sum((2 * F_ranks - 1 - N) * F, axis=1)
    else:
        raise Exception("Efficiency unknown.")

    edge_dict = dict(zip(G.edges, values))
    return edge_dict
Exemple #20
0
def hydraulics_known_flows_wo_loops(G, m_node):
    A = nx.incidence_matrix(G, oriented=True).todense()
    m_node[0] = - np.sum(m_node[1:])
    print(m_node.shape)
    print(A.shape)
    flows = np.linalg.lstsq(A,m_node)[0]
    return flows
Exemple #21
0
def powerflow(graph):
    """
    Checks power flow solution and save dictionary of voltages.
    """
    A = nx.incidence_matrix(graph,
                            nodelist=list(graph.nodes()),
                            edgelist=list(graph.edges()),
                            oriented=True).toarray()

    node_ind = [i for i,node in enumerate(graph.nodes()) \
                if graph.nodes[node]['label'] != 'S']
    nodelist = [node for node in list(graph.nodes()) \
                if graph.nodes[node]['label'] != 'S']
    edgelist = [edge for edge in list(graph.edges())]
    nodeload = nx.get_node_attributes(graph, 'load')

    # Resistance data
    edge_r = nx.get_edge_attributes(graph, 'r')
    R = np.diag([1.0/edge_r[e] if e in edge_r else 1.0/edge_r[(e[1],e[0])] \
         for e in list(graph.edges())])
    G = np.matmul(np.matmul(A, R), A.T)[node_ind, :][:, node_ind]
    p = np.array([1e-3 * nodeload[n] for n in nodelist])

    # Voltages and flows
    v = np.matmul(np.linalg.inv(G), p)
    f = np.matmul(np.linalg.inv(A[node_ind, :]), p)
    voltage = {h: 1.0 - v[i] for i, h in enumerate(nodelist)}
    flows = {e: log(abs(f[i])) for i, e in enumerate(edgelist)}
    subnodes = [node for node in list(graph.nodes()) \
                if graph.nodes[node]['label'] == 'S']
    for s in subnodes:
        voltage[s] = 1.0
    nx.set_node_attributes(graph, voltage, 'voltage')
    nx.set_edge_attributes(graph, flows, 'flow')
    return
Exemple #22
0
 def test_empty_graph(self, n_vertices=11):
     """Empty graphs have either no edge, or self-loops only. The Laplacian
     doesn't see self-loops, as the gradient on those edges is always zero.
     """
     adjacencies = [
         np.zeros((n_vertices, n_vertices)),
         np.identity(n_vertices),
     ]
     for adjacency, n_edges in zip(adjacencies, [0, n_vertices]):
         graph = graphs.Graph(adjacency)
         self.assertEqual(graph.n_vertices, n_vertices)
         self.assertEqual(graph.n_edges, n_edges)
         self.assertEqual(graph.W.nnz, n_edges)
         for laplacian in ['combinatorial', 'normalized']:
             graph.compute_laplacian(laplacian)
             self.assertEqual(graph.L.nnz, 0)
             sources, targets, weights = graph.get_edge_list()
             self.assertEqual(len(sources), n_edges)
             self.assertEqual(len(targets), n_edges)
             self.assertEqual(len(weights), n_edges)
             graph.compute_differential_operator()
             self.assertEqual(graph.D.nnz, 0)
             graph.compute_fourier_basis()
             np.testing.assert_allclose(graph.U, np.identity(n_vertices))
             np.testing.assert_allclose(graph.e, np.zeros(n_vertices))
         # NetworkX uses the same conventions.
         G = nx.from_scipy_sparse_matrix(graph.W)
         self.assertEqual(nx.laplacian_matrix(G).nnz, 0)
         self.assertEqual(nx.normalized_laplacian_matrix(G).nnz, 0)
         self.assertEqual(nx.incidence_matrix(G).nnz, 0)
Exemple #23
0
    def __init__(self, *args, **kwargs):
        super(UniformityOfSamplerForUniformSpanningTree,
              self).__init__(*args, **kwargs)

        # Sample a connected Erdos-Renyi graph
        n, p = 5, 0.4
        nb_st_min, nb_st_max = 5, 10

        it_max = 100
        for _ in range(it_max):

            g = erdos_renyi_graph(n, p)

            if is_connected(g):
                A = incidence_matrix(g, oriented=True)[:-1, :].toarray()

                potential_st = itt.combinations(range(g.number_of_edges()),
                                                n - 1)
                list_st = [
                    st for st in potential_st if det_ST(A, range(n - 1), st)
                ]

                if nb_st_min <= len(list_st) <= nb_st_max:
                    break
        else:
            raise ValueError('No satisfactory Erdos-Renyi graph found')

        self.nb_spanning_trees = len(list_st)

        self.ust = UST(g)

        self.nb_samples = 1000
Exemple #24
0
def hardy_cross(G, n, init_guess=None):
    cycles = nx.cycle_basis(G)
    cycles = np.array([[
        tuple([cycles[j][i], cycles[j][i + 1]]) if
        (i < len(cycles[j]) - 1) else tuple([cycles[j][i], cycles[j][0]])
        for i in range(len(cycles[j]))
    ] for j in range(len(cycles))])

    L = [G.node[i]['demand'] for i in G.node.keys()]
    edges = np.array(G.edges())
    edge_idx = np.full((len(G), len(G)), 9999, dtype=int)
    edge_idx[edges[:, 0], edges[:, 1]] = np.arange(len(G.edges()))
    edge_idx[edges[:, 1], edges[:, 0]] = np.arange(len(G.edges()))

    edge_dir = np.zeros((len(G), len(G)), dtype=int)
    edge_dir[edges[:, 0], edges[:, 1]] = 1
    edge_dir[edges[:, 1], edges[:, 0]] = -1

    if init_guess == None:
        init_guess = np.linalg.lstsq(
            nx.incidence_matrix(G, oriented=True).toarray(), L)[0]
    A = init_guess.copy().astype(float)
    for i in range(n):
        for u in cycles:
            R = np.array([G[j[0]][j[1]]['weight'] for j in u])
            D = np.array(edge_dir[u[:, 0], u[:, 1]])
            C = np.array(A[edge_idx[u[:, 0], u[:, 1]]])
            C = C * D
            dV = (R * C).sum()
            di = dV / R.sum()
            C = (C - di) * D
            A[edge_idx[u[:, 0], u[:, 1]]] = C
    return A
Exemple #25
0
    def getRho(self, points, exp=2):
        if self._cFlag:
            numer = self.getEpsilon(points, exp).sum()

            A = nx.incidence_matrix(self.G, oriented=True).toarray()
            m, n = A.shape
            b = np.zeros(m - 1)
            b[0] = -1

            _, inds = sp.Matrix(A[:m - 1]).rref()

            mask = np.ones(n, dtype=bool)
            mask[list(inds)] = False
            B = A[:m - 1, ~mask]
            N = A[:m - 1, mask]
            H = np.append(-(np.linalg.inv(B) @ N),
                          np.identity(n - (m - 1)),
                          axis=0)
            b_H = np.append(-np.linalg.inv(B) @ b, np.zeros(n - (m - 1)))
            z = np.array(points)[:, mask]
            denom = (((H @ z.T).T - b_H)**2).sum()
            rho = max(1 - numer / denom, 0)
            return rho

        else:
            print('Edge weights undefined')
def LESS(signal, G,  rho, weightFactors):

    epsilon = 1e-5
    n = len(G.nodes())
    ## LESS
    current_obj = -np.inf
    current_sol = None
    inci = nx.incidence_matrix(G, oriented=True).T
    for t in range(1, n+1):
        x_opt = cvx.Variable(n)
        y = signal
        v = inci * x_opt
        obj = x_opt.T * y / np.sqrt(t)
        constraints = [x_opt >= 0,
                       x_opt <= 1,
                       cvx.sum(x_opt) <= t,
                       cvx.norm(cvx.pos(v), 1) <= rho]
        prob = cvx.Problem(cvx.Maximize(obj), constraints)
        #try:
        prob.solve(solver=cvx.SCS)
        #except:
        #    print("Cannot solve! because: ", prob.status)
        #    continue

        if obj.value > current_obj:
            current_obj = obj.value
            current_sol = x_opt.value
    
    if current_sol is not None:
        LESS_subset = np.where(current_sol > epsilon)[0].tolist()
        LESS_sol    = (LESS_subset, cal_removal_loss(LESS_subset, G, weightFactors))
    else:
        LESS_sol = (None, rho)
    return LESS_sol
Exemple #27
0
def generatePSDF(G, PTDF):
	nodelist= sorted(G.nodes())
	edgelist= list(G.edges())
	edgelist.sort(key=sortKeyWith2ndThen1stListValue) # sortieren nach zweitem Node, anpassen an xls am besten
	A=nx.incidence_matrix(G,nodelist, edgelist,True)
	A=A.toarray()
	A=A.transpose() #andere Defintion in networkx
	A=np.asmatrix(A)
	A=-A
	# Read susceptances
	X=np.zeros((len(edgelist),len(edgelist)))
	#B=np.zeros((len(edgelist),len(edgelist)))
	for i in range(0,len(edgelist)):
		X[i,i]=G.edges[edgelist[i][0], edgelist[i][1]]['weight']

	B=X.copy()
	B[B!=0]=-1/B[B!=0] #invertiere Reaktancen to get susceptances
	B=np.asmatrix(B)
	
	Aux1=(B*A)
	Aux2=B

	PSDF=Aux2-PTDF*Aux1.transpose()
	#import pdb;pdb.set_trace()
	return PSDF
def effective_resistance_project(G, beacons):
    from numpy.linalg import pinv
    projection = np.zeros((G.number_of_nodes() - len(beacons), len(beacons)))
    L = nx.laplacian_matrix(G)
    B = nx.incidence_matrix(G).T
    B_e = B.copy()
    
    L_pseudo = pinv(L)
    for i in xrange(B.shape[0]):
        min_ace = np.min(np.where(B[i,:] ==1)[1])
        B_e[i, min_ace] = -1
    
    for i,beacon in enumerate(beacons):
        node_index = 0
        for j,node in enumerate(G.nodes()):
            if node in beacons:
                continue
                
            battery = np.zeros((B_e.shape[1],1))
            battery[i] = 1
            battery[node_index] = -1

            p = L_pseudo * battery
            projection[node_index][i] = abs(p[i] - p[j])
            node_index += 1 
    return projection
Exemple #29
0
 def test_incidence_nx(graph):
     r"""Test that the incidence matrix corresponds to NetworkX."""
     incidence_pg = np.sign(graph.D.toarray())
     G = nx.OrderedDiGraph if graph.is_directed() else nx.OrderedGraph
     graph_nx = nx.from_scipy_sparse_matrix(graph.W, create_using=G)
     incidence_nx = nx.incidence_matrix(graph_nx, oriented=True)
     np.testing.assert_equal(incidence_pg, incidence_nx.toarray())
 def run_pf(self):
     """
     Checks power flow solution and plots the voltage at different nodes in the 
     network through colorbars.
     """
     
     A = nx.incidence_matrix(self.dist_net,nodelist=list(self.dist_net.nodes()),
                             edgelist=list(self.dist_net.edges()),oriented=True).toarray()
     
     nodelabel = nx.get_node_attributes(self.dist_net,'label')
     nodeload = nx.get_node_attributes(self.dist_net,'load')
     node_ind = [i for i,node in enumerate(self.dist_net.nodes()) \
                 if nodelabel[node] != 'S']
     nodelist = [node for node in list(self.dist_net.nodes()) if nodelabel[node] != 'S']
     edgelist = [edge for edge in list(self.dist_net.edges())]
     
     # Resistance data
     edge_r = nx.get_edge_attributes(self.dist_net,'r')
     R = np.diag([1.0/edge_r[e] if e in edge_r else 1.0/edge_r[(e[1],e[0])] \
          for e in list(self.dist_net.edges())])
     G = np.matmul(np.matmul(A,R),A.T)[node_ind,:][:,node_ind]
     
     for d in range(24):
         p = np.array([nodeload[h][d] for h in nodelist])
         v = np.matmul(np.linalg.inv(G),p)
         f = np.matmul(np.linalg.inv(A[node_ind,:]),p)
         subnodes = [node for node in list(self.dist_net.nodes()) \
                     if nodelabel[node] == 'S']
         for s in subnodes: self.voltage[s].append(1.0)
         for i,n in enumerate(nodelist):
             self.voltage[n].append(1.0-v[i])
         for i,e in enumerate(edgelist):
             self.flows[e].append(abs(f[i]))
     return
Exemple #31
0
    def __init__(self, *args, **kwargs):
        super(TestUniformityUniformSpanningTreeSampler, self).__init__(*args, **kwargs)

        n, p = 5, 0.4
        nb_st_min, nb_st_max = 5, 10

        it_max = 100
        for _ in range(it_max):

            g = erdos_renyi_graph(n, p)

            if is_connected(g):
                A = incidence_matrix(g, oriented=True)[:-1, :].toarray()

                potential_st = combinations(range(g.number_of_edges()), n - 1)
                list_st = [st for st in potential_st if det(A[:, st])]

                if nb_st_min <= len(list_st) <= nb_st_max:
                    break
        else:
            raise ValueError('No satisfactory Erdos-Renyi graph found')

        self.g = g
        self.list_of_neighbors = [list(self.g.neighbors(v))
                                  for v in self.g.nodes()]

        self.nb_spanning_trees = len(list_st)

        g_edges_str = [str(set(edge)) for edge in self.g.edges()]
        self.dict_edge_label = dict(zip(g_edges_str,
                                    range(self.g.number_of_edges())))

        self.nb_samples = 1000
        self.list_of_samples = []
Exemple #32
0
def incidence_matrices(G, V, E, faces, edge_to_idx):
    """
    Returns incidence matrices B1 and B2

    :param G: NetworkX DiGraph
    :param V: list of nodes
    :param E: list of edges
    :param faces: list of faces in G

    Returns B1 (|V| x |E|) and B2 (|E| x |faces|)
    B1[i][j]: -1 if node is is tail of edge j, 1 if node is head of edge j, else 0 (tail -> head) (smaller -> larger)
    B2[i][j]: 1 if edge i appears sorted in face j, -1 if edge i appears reversed in face j, else 0; given faces with sorted node order
    """
    B1 = np.array(
        nx.incidence_matrix(G, nodelist=V, edgelist=E,
                            oriented=True).todense())
    B2 = np.zeros([len(E), len(faces)])

    for f_idx, face in enumerate(faces):  # face is sorted
        edges = [face[:-1], face[1:], [face[0], face[2]]]
        e_idxs = [edge_to_idx[tuple(e)] for e in edges]

        B2[e_idxs[:-1], f_idx] = 1
        B2[e_idxs[-1], f_idx] = -1
    return B1, B2
Exemple #33
0
    def __compute_kernel_eig_vecs(self):
        """Orthogonalize the rows of vertex-edge incidence matrix (:math:`A`) to get the eigenvectors :math:`U` of the kernel :math:`\mathbf{K}`.
        """

        vert_edg_inc = nx.incidence_matrix(self.graph, oriented=True)
        A = vert_edg_inc[:-1, :].toarray()  # Discard any row e.g. the last one
        # Orthonormalize rows of A
        self.kernel_eig_vecs, _ = la.qr(A.T, mode='economic')
 def __init__(self,graph,grbpath=None,flowcap=1000,feeder_buffer=1):
     """
     graph: the base graph which has the list of possible edges.
     tnodes: dictionary of transformer nodes with power consumption as value.
     """
     # Get tmp path for gurobi log files
     self.tmp = grbpath+"gurobi/"
     
     # Get data from graph
     self.edges = list(graph.edges())
     self.nodes = list(graph.nodes())
     LABEL = nx.get_node_attributes(graph,name='label')
     LOAD = nx.get_node_attributes(graph,name='load')
     LENGTH = nx.get_edge_attributes(graph,name='length')
     DIST = nx.get_node_attributes(graph,name='distance')
     self.tindex = [i for i,n in enumerate(self.nodes) if LABEL[n]=='T']
     self.rindex = [i for i,n in enumerate(self.nodes) if LABEL[n]=='R']
     self.tnodes = [n for n in self.nodes if LABEL[n]=='T']
     
     # Vectorize the data for matrix computation
     self.d = [1e-3*DIST[self.nodes[i]] for i in self.rindex]
     self.A = nx.incidence_matrix(graph,nodelist=self.nodes,
                                  edgelist=self.edges,oriented=True)
     self.I = nx.incidence_matrix(graph,nodelist=self.nodes,
                                  edgelist=self.edges,oriented=False)
     self.c = [1e-3*LENGTH[self.edges[i]] for i in range(len(self.edges))]
     self.p = np.array([1e-3*LOAD[self.nodes[i]] for i in self.tindex])
     
     # Get feeder rating and number
     total_cap = sum(LOAD.values())*1e-3 # total kVA load to be served
     feeder_cap = int(total_cap/1000)+feeder_buffer # Maximum number of feeders
     
     # Create the optimization model
     self.model = grb.Model(name="Get Primary Network")
     self.model.ModelSense = grb.GRB.MINIMIZE
     self.variables()
     self.masterTree()
     self.powerflow()
     self.radiality()
     self.flowconstraint(M=flowcap)
     self.connectivity()
     self.limit_feeder(M=feeder_cap)
     self.objective()
     self.model.write(self.tmp+"primary.lp")
     self.solve()
     return
Exemple #35
0
def show_incidence_matrix( g, ax ):
    global args_
    im = nx.incidence_matrix( g )
    img = im.todense()
    ax.grid( False )
    ax.imshow( img, aspect = 'auto', interpolation = 'none' )
    np.savetxt( '%s_incidence_matrix.csv' % args_.swc, img, fmt='%d' )
    print( 'Saved matrix to csv file' )
Exemple #36
0
def create_tap_variables(scenario):
    if scenario not in ['manhattan', '3x3']:
        raise Exception("Not a a known scenario")
    else:
        if scenario == "manhattan":
            G, pos, demands = amod.test_cases.create_manhattan_scenario(reduced = True)
            #create vector with capacities
            caps = matrix(
                [G[edge[0]][edge[1]]['cap_throughput'] for edge in G.edges()]
              )
        elif scenario == "3x3":
            G, pos, demands = amod.test_cases.create_3x3_lattice()
            caps = matrix(
                [G[edge[0]][edge[1]]['capacity'] for edge in G.edges()]
            )

        #info at http://cvxopt.org/userguide/solvers.html#problems-with-nonlinear-objectives
        #number of classes, nodes, edges
        K = len(demands)
        S = len(G.nodes())
        I = len(G.edges())

        #create vector with all freeflow costs
        t0 = matrix(
                [G[e[0]][e[1]]['time'] for e in G.edges()]
        )

        #create vector with demand requirements (b in cvxopt terms)
        # the IK part is for nonnegativty in the lp
        b = np.zeros(S * K, dtype=np.float64)
        for k, dem in enumerate(demands):
            b[dem[0] + S*k] = dem[2] #source
            b[dem[1] + S*k] = -dem[2] #sink
        b = matrix(b)

        #create matrix A for equality constraints
        #it consists of a massive block diagonal matrix
        i_matrix = -nx.incidence_matrix(G, oriented=True).toarray()
        # sadly, spdiag doesn't work
        print "loading A"
        A = method4(i_matrix, S, I, K)

        scenario_params = {
            'network': G,
            'pos': pos,
            'demands': demands,
            'A': A,
            'b': b,
            't0': t0,
            'caps': caps,
            'I': I,
            'K': K
        }
        return scenario_params
Exemple #37
0
    def test_incidence_matrix(self):
        "Conversion to incidence matrix"
        I = nx.incidence_matrix(self.G,
                                nodelist=sorted(self.G),
                                edgelist=sorted(self.G.edges()),
                                oriented=True).todense().astype(int)
        assert_equal(I, self.OI)
        I = nx.incidence_matrix(self.G,
                                nodelist=sorted(self.G),
                                edgelist=sorted(self.G.edges()),
                                oriented=False).todense().astype(int)
        assert_equal(I, numpy.abs(self.OI))

        I = nx.incidence_matrix(self.MG,
                                nodelist=sorted(self.MG),
                                edgelist=sorted(self.MG.edges()),
                                oriented=True).todense().astype(int)
        assert_equal(I, self.OI)
        I = nx.incidence_matrix(self.MG,
                                nodelist=sorted(self.MG),
                                edgelist=sorted(self.MG.edges()),
                                oriented=False).todense().astype(int)
        assert_equal(I, numpy.abs(self.OI))

        I = nx.incidence_matrix(self.MG2,
                                nodelist=sorted(self.MG2),
                                edgelist=sorted(self.MG2.edges()),
                                oriented=True).todense().astype(int)
        assert_equal(I, self.MGOI)
        I = nx.incidence_matrix(self.MG2,
                                nodelist=sorted(self.MG),
                                edgelist=sorted(self.MG2.edges()),
                                oriented=False).todense().astype(int)
        assert_equal(I, numpy.abs(self.MGOI))
def simulate_edge_flows(G, weighted=False):
    """ Simulate the edge flows to first order in kappa.
    Return a matrix in which each column contains DeltaF
    for the corresponding perturbed edge in G.edges_iter().
    Actual perturbation current is proportional to F^0_e,
    but that information is not in this matrix.
    The result is the symmetric matrix B. If the network is
    weighted it is necessary to divide either rows or columns
    by K_e^2 in order to get the factor proportional to
    \kappa F_e^0 to order O(kappa).
    """
    # conductivity weights
    if weighted:
        for u, v, d in G.edges_iter(data=True):
            d['cond'] = d['conductivity']**4/d['weight']
    else:
        for u, v, d in G.edges_iter(data=True):
            d['cond'] = 1.
    
    L = sparse_laplacian(G, weight='cond').tolil()
    
    # newfangled method
    #t0 = time()
    Li = np.linalg.pinv(L.todense())
    d1 = np.array(nx.incidence_matrix(G, oriented=True))
    A = np.array(np.dot(np.dot(d1.T, Li), d1))
            
    if weighted:
        weights = np.array([d['cond'] 
            for u, v, d in G.edges_iter(data=True)])
        A = weights*(A*weights.T).T

    #Re = A[np.diag_indices(A.shape[0])]/weights
    #Re2 = np.array([Li[u,u] - 2*Li[u,v] + Li[v,v]
    #    for u, v in G.edges_iter()])
    #print np.allclose(Re, Re2)
    
    # first order in kappa
    diag_inds = np.diag_indices(A.shape[0])

    if weighted:
        A[diag_inds] = weights - A[diag_inds]
    else:
        A[diag_inds] = 1 - A[diag_inds]

    return A
    def __init__(self, graphtype, graphvars) :
        """ Initialize graph.

        graphtype : Based on networkx graph type.
            Ex : 'barabasi_albert'
        graphvars : Dictionary of all parameters for the graph as
        given in the documentation.
            Ex : For 'barabasi_albert' : dict(n = 10, m = 2, seed = 1)
        """

        try :
            gname = eval('nx.' + graphtype)
            allargs = getargspec(gname)
            impargs = allargs.args
            varlist = '('

            for i in impargs :
                if graphvars.has_key(i) :
                    varlist += i + '=' + str(graphvars.get(i)) + ','

            varlist = varlist[:-1]
            varlist += ')'

            while True :
                self.Gp = eval('nx.' + graphtype + varlist)
                if nx.is_connected(self.Gp) :
                    break

        except AttributeError :
            print "Graph type does not exist. Check name."
            raise
        except TypeError :
            print "Arguments in graphvars do not meet requirements."
            raise

        self.N = self.Gp.number_of_nodes()
        self.graphvars = graphvars
        self.graphtype = graphtype
        self.graphvars = graphvars
        self.rawBmat = nx.incidence_matrix(self.Gp, oriented = True) # Random orientation does not affect the solution
def a_matrix(g):

    # couple_matrix
    # Construct coupled graph matrix from graph structure
    #
    #
    # Inputs:   g           -   current graph structure
    #
    # Outputs:  a_coup      -   coupled incidence matrix
    #           a_vertices  -   order of vertices in coupled matrix
    #

    # order of vertices in incidence matrix
    nodelist = g.nodes()
    a_vertices = [n for n in nodelist if 'M' not in n and 'S' not in n]

    # Incidence matrix
    a_sparse = nx.incidence_matrix(g, nodelist=nodelist, oriented=True)
    a_dense = a_sparse.todense()

    # Build new coupled edges
    edges_to_add = build_coupled_edges(g, nodelist)

    # Add edges to matrix
    try:
        a_extra = np.hstack((a_dense, edges_to_add))
    except ValueError:
        print('No edges to couple in this frame')
        a_extra = a_dense

    # Remove split/merge vertices and previously associated edges
    a_reduce = reduce_to_coupled(a_extra, nodelist)

    # Adjust for higher capacity edges
    a_cap = adjust_capacity_edges(a_reduce, nodelist)

    a_coup = a_cap.copy()
    return a_coup, a_vertices
Exemple #41
0
def find_arc_flow(net, demand):
    '''Determine flow on arcs.

    Values are unique for tree networks.

    net: a (tree) network
    demand: maps node IDs to demand (or supply if negative)
    '''

    assert sum(demand.values()) == 0.0, 'flow not balanced'

    # TODO: is this still necessary?
    nods = sorted(net.dg.nodes())
    edgs = sorted(net.dg.edges())
    
    im = nx.incidence_matrix(net.dg, nodelist=nods, edgelist=edgs, oriented=True)
    im = im[1:, :] # skip any (redundant) row
    
    dem = np.array([demand.get(n, 0.0) for n in nods])
    dem = dem[1:] # skip same (redundant) row

    sol = sp.sparse.linalg.spsolve(im, dem)
    return dict(zip(edgs, sol))
    try :
        data.optimalL()
        print "Optimal Lvec found."
        print "Solving the optimized problem."
    except sla.ArpackNoConvergence :
        print "Arpack convergence problem."
        pass

    # Reset the graph with the optimal values. This is very important.
    # The graph structure should remain the same before optimization.
    newdata = Mygraph(gtype, param)
    newdata.Gp = data.Gp
    newdata.N = data.N
    newdata.graphtype = data.graphtype
    newdata.graphvars = data.graphvars
    newdata.rawBmat = nx.incidence_matrix(data.Gp, oriented = True)

    newdata.init_graph(Cap0, Eps, data.L, G, Input_nodes, forc_amp, forc_coef, Ord_req, tpoints, Omega = omega)
    newdata.times = np.linspace(0.0, 2.0*np.pi / omega, tpoints)

    # Solves for the parameters upto order Ord_req.
    newdata.solve()
    newdata.timesol()
    newdata.fouriersol()
    newdata.iterative_solver()

    # Compute new energy in higher harmonics.
    newconc = np.mean(np.sum(np.abs(newdata.alphamat[:,1:]),1) / np.sum(np.abs(newdata.alphamat),1))
    newdata.summary()
    print "Percentage of energy in higher harmonics after optimization is {0:g}%".format(newconc*100)
	def bound1(self):
		return nx.incidence_matrix(self).astype(int)
Exemple #44
0
    volt = [ e[2]["volt"] for e in G.edges(data=True) ]
    curr = [ e[2]["curr"] for e in G.edges(data=True) ]
 
    # Voltage law
    M = nx.cycle_basis_matrix (G).T
    KVL = []
    for row in M:
        ind = row.nonzero()[0]
        token = []
        for j in ind:
            token.append (" + "+volt[j] if row[j]>0 else " - "+volt[j])

        KVL.append("".join(token)+" = 0")

    # Current law
    M = nx.incidence_matrix(G, oriented=True).toarray()[:-1]
    KCL = []
    for row in M:
        ind = row.nonzero()[0]
        token = []
        for j in ind:
            token.append (" + "+curr[j] if row[j]>0 else " - "+curr[j])

        KCL.append("".join(token)+" = 0")
    
    print "Kirchhof laws for three resistors in series:"
    for eq in KVL:
        print eq 
    for eq in KCL:
        print eq
        
Exemple #45
0
 def test_incidence_matrix(self):
     "Conversion to incidence matrix"
     assert_equal(nx.incidence_matrix(self.G,oriented=True).todense(),self.OI)
     assert_equal(nx.incidence_matrix(self.G).todense(),numpy.abs(self.OI))
     assert_equal(nx.incidence_matrix(self.MG,oriented=True).todense(),self.OI)
     assert_equal(nx.incidence_matrix(self.MG).todense(),numpy.abs(self.OI))
     assert_equal(nx.incidence_matrix(self.MG2,oriented=True).todense(),self.MGOI)
     assert_equal(nx.incidence_matrix(self.MG2).todense(),numpy.abs(self.MGOI))
     assert_equal(nx.incidence_matrix(self.WG,oriented=True).todense(),self.OI)
     assert_equal(nx.incidence_matrix(self.WG).todense(),numpy.abs(self.OI))
     assert_equal(nx.incidence_matrix(self.WG,oriented=True,
                                      weight='weight').todense(),0.5*self.OI)
     assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(),
                  numpy.abs(0.5*self.OI))
     assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(),
                  0.3*self.OI)
     WMG=nx.MultiGraph(self.WG)
     WMG.add_edge(0,1,attr_dict={'weight':0.5,'other':0.3})
     assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(),
                  numpy.abs(0.5*self.MGOI))
     assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(),
                  0.5*self.MGOI)
     assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(),
                  0.3*self.MGOI)
Exemple #46
0
    def test_weighted_incidence_matrix(self):
        I = nx.incidence_matrix(self.WG,
                                nodelist=sorted(self.WG),
                                edgelist=sorted(self.WG.edges()),
                                oriented=True).todense().astype(int)
        assert_equal(I, self.OI)
        I = nx.incidence_matrix(self.WG,
                                nodelist=sorted(self.WG),
                                edgelist=sorted(self.WG.edges()),
                                oriented=False).todense().astype(int)
        assert_equal(I, numpy.abs(self.OI))

        # assert_equal(nx.incidence_matrix(self.WG,oriented=True,
        #                                  weight='weight').todense(),0.5*self.OI)
        # assert_equal(nx.incidence_matrix(self.WG,weight='weight').todense(),
        #              numpy.abs(0.5*self.OI))
        # assert_equal(nx.incidence_matrix(self.WG,oriented=True,weight='other').todense(),
        #              0.3*self.OI)

        I = nx.incidence_matrix(self.WG,
                                nodelist=sorted(self.WG),
                                edgelist=sorted(self.WG.edges()),
                                oriented=True,
                                weight='weight').todense()
        assert_equal(I, 0.5 * self.OI)
        I = nx.incidence_matrix(self.WG,
                                nodelist=sorted(self.WG),
                                edgelist=sorted(self.WG.edges()),
                                oriented=False,
                                weight='weight').todense()
        assert_equal(I, numpy.abs(0.5 * self.OI))
        I = nx.incidence_matrix(self.WG,
                                nodelist=sorted(self.WG),
                                edgelist=sorted(self.WG.edges()),
                                oriented=True,
                                weight='other').todense()
        assert_equal(I, 0.3 * self.OI)

        # WMG=nx.MultiGraph(self.WG)
        # WMG.add_edge(0,1,weight=0.5,other=0.3)
        # assert_equal(nx.incidence_matrix(WMG,weight='weight').todense(),
        #              numpy.abs(0.5*self.MGOI))
        # assert_equal(nx.incidence_matrix(WMG,weight='weight',oriented=True).todense(),
        #              0.5*self.MGOI)
        # assert_equal(nx.incidence_matrix(WMG,weight='other',oriented=True).todense(),
        #              0.3*self.MGOI)

        WMG = nx.MultiGraph(self.WG)
        WMG.add_edge(0, 1, weight=0.5, other=0.3)
        I = nx.incidence_matrix(WMG,
                                nodelist=sorted(WMG),
                                edgelist=sorted(WMG.edges(keys=True)),
                                oriented=True,
                                weight='weight').todense()
        assert_equal(I, 0.5 * self.MGOI)
        I = nx.incidence_matrix(WMG,
                                nodelist=sorted(WMG),
                                edgelist=sorted(WMG.edges(keys=True)),
                                oriented=False,
                                weight='weight').todense()
        assert_equal(I, numpy.abs(0.5 * self.MGOI))
        I = nx.incidence_matrix(WMG,
                                nodelist=sorted(WMG),
                                edgelist=sorted(WMG.edges(keys=True)),
                                oriented=True,
                                weight='other').todense()
        assert_equal(I, 0.3 * self.MGOI)
Exemple #47
0
def create_rap_variables(scenario):
    if scenario not in ['manhattan', '3x3']:
        raise Exception("Not a a known scenario")
    else:
        if scenario == "manhattan":
            G, pos, demands = amod.test_cases.create_manhattan_scenario(reduced = True)
            #create vector with capacities
            caps = matrix(
                [G[edge[0]][edge[1]]['cap_throughput'] for edge in G.edges()]
              )
        elif scenario == "3x3":
            G, pos, demands = amod.test_cases.create_3x3_lattice()
            caps = matrix(
                [G[edge[0]][edge[1]]['capacity'] for edge in G.edges()]
            )

        #create rebalancing demands
        ## first create set of stations (note difference from nodes)
        stations = set([i for tup in demands for i in tup[:2]])
        reb_demands = [demand for demand in combinations(stations,2)]

        #info at http://cvxopt.org/userguide/solvers.html#problems-with-nonlinear-objectives
        #number of classes, nodes, edges
        Q = len(demands)
        R = len(reb_demands)
        K =  Q + R
        S = len(G.nodes())
        I = len(G.edges())

        #create vector with all freeflow costs
        t0 = matrix(
                [G[e[0]][e[1]]['time'] for e in G.edges()]
        )

        #create vector with demand requirements (b in cvxopt terms)
        # ok, so we need SQ rows for the regular passenger demands
        # S rows to ensure we are rebalanced at each node
        # (S-2) * R rows to ensure that on non-sink, non-source nodes, we have conservation of traffic
        b = np.zeros(S * Q + S + (S-2)*R, dtype=np.float64)
        for q, dem in enumerate(demands):
            b[dem[0] + S*q] = dem[2] #source
            b[dem[1] + S*q] = -dem[2] #sink
        b = matrix(b)

        #create matrix A for equality constraints
        #it consists of a massive block diagonal matrix
        i_matrix = -nx.incidence_matrix(G, oriented=True).toarray()
        # sadly, spdiag doesn't work
        print "loading A"
        A = method5(i_matrix, S, I, Q, R, reb_demands, stations)

        scenario_params = {
            'network': G,
            'pos': pos,
            'demands': demands,
            'stations':stations,
            'S':S,
            'reb_demands': reb_demands,
            'A': A,
            'b': b,
            't0': t0,
            'caps': caps,
            'I': I,
            'Q': Q,
            'R': R,
            'K': K
        }
        return scenario_params
Exemple #48
0
        msg('optimizing only soft edges score…', 'blue')
        vec_edges = m
        km_pred = km.fit_predict(m / np.sqrt((m**2).sum(1))[:, np.newaxis])
        cost, rcost, xw = vec_max_edge_score(m, np.copy(km.cluster_centers_), np.ones(m.shape[0]), 5e1, 20)
        orig_ami_us, orig_ami_km = (AMI(wc, np.argmax([email protected], 1)), AMI(wc, km_pred))
        us_dst = np.mean(cdist(W, xw).min(0))
        w_km = km.cluster_centers_
        km_dst = np.mean(cdist(W, w_km / np.sqrt((w_km**2).sum(1))[:, np.newaxis]).min(0))
        msg('us: {:.3f} (mean dst: {:.3f})\n{} {:.3f}'.format(orig_ami_us, us_dst,
                                                              'km:'.rjust(14), orig_ami_km))
        res[it, (2, 3, 4, 16)] = orig_ami_us, us_dst, orig_ami_km, km_dst

        msg('optimizing soft edges score AND nodes cost…', 'blue')
        a = np.ones(m.shape[0])
        B = nx.incidence_matrix(H)
        Bd = B.toarray().astype(np.int8)
        D = 1 / np.array(B.sum(1)).ravel()
        eta = 700
        ideg = D[:, np.newaxis]
        grad_vec_node_loss_wrt_w = grad(vec_node_loss_wrt_w)
        xw, xa, cn, ce = vector_mixed(m, np.copy(km.cluster_centers_), np.ones(m.shape[0]), [VVar.W, ],
                                      node_factor=.2, num_loop=1, inner_size=50, ef=.8)
        orig_ami_us, us_dst = (AMI(wc, np.argmax([email protected], 1)), np.mean(cdist(W, xw).min(0)))
        msg('us: {:.3f} (mean dst: {:.3f})'.format(orig_ami_us, us_dst))
        text = 'mean edge score changed from {:.3f} to {:.3f}, and mean node cost from {:.3f} to {:.3f}'
        msg(text.format(ce[0], ce[-1], cn[0], cn[-1]))
        res[it, (5, 6)] = orig_ami_us, us_dst

        msg('improving kmean through heuristics…', 'blue')
        nnz = int((np.abs(W) > .1).sum() / k)
Exemple #49
0
def solve_asymp_reb(road_network, demands, capacity='capacity', cost='cost', capacitated=True, availability=1.0):
    '''
    Solves the capacitated rebalancing problem using the Asymptotic Assumption.
    road_network: networkx.DiGraph
    arguments:
        demands:    list of tuples of the form [(source_node, sink_node, demand)]
                    Demands are assumed to be in trips per hour and their source/dest are the order in
                    which nodes appear in the function road_network.nodes()
        capacity:   label to use for capacity in the network graph.
                    the value should be in terms of throughput (vehicles per hour)
        cost:       label to use for cost of trip in link
                    the value should be in terms of time to travel link in freeflow
        capacitated:bool, whether to take road capacity into effect

    returns:
        flows:      2D numpy array where each column contains the flow at each edge for that specific class
                    The order of the columns is identical to that of the order of demands in the input arg
                    and the order of the rows identical to the order of road_network.edges()
        demands_cr: updated demand list of tuples
        P:          Transition matrix between nodes
    '''

    #number of classes, nodes, edges
    K = len(demands)
    S = len(road_network.nodes())
    I = len(road_network.edges())

    #cost vector
    c = matrix(
        [road_network[e[0]][e[1]][cost] for e in road_network.edges()]*K
    )
    print "cost vector"

    #A is a block diagonal matrix
    #we need to remove the rows that correspond to the sources and sinks for each class
    #and we need to add a tile i_matrix at the end
    i_matrix = -nx.incidence_matrix(road_network, oriented=True).toarray()
    matrices = []
    source_rows= []
    sink_rows = []
    source_demands = []
    sink_demands = []
    print "masks"
    for k, dem in enumerate(demands):
        #print "at dem", k
        mask = np.ones(S, dtype=bool)
        mask_source = np.ones(S, dtype=bool)
        mask_sink = np.ones(S, dtype=bool)
        mask[[dem[0],dem[1]]] = False
        mask_source[[dem[0]]] = False
        mask_sink[[dem[1]]] = False
        sparse_imatrix = sparse(matrix(i_matrix[mask]))
        
        matrices.append(spmatrix(sparse_imatrix.V, 
                                 sparse_imatrix.I, 
                                 sparse_imatrix.J + I*k,
                                 size = (sparse_imatrix.size[0], I*K)
                                ))
        
        #print preceding_zeros.shape, i_matrix[~mask_source].shape, following_zeros.shape
        
        source_imatrix = sparse(matrix(-i_matrix[~mask_source]))
        sink_imatrix = sparse(matrix(i_matrix[~mask_sink]))
        source_rows.append(spmatrix(source_imatrix.V, 
                                 source_imatrix.I, 
                                 source_imatrix.J + I*k,
                                 size = (source_imatrix.size[0], I*K)
                                ))
        sink_rows.append(spmatrix(sink_imatrix.V, 
                                 sink_imatrix.I, 
                                 sink_imatrix.J + I*k,
                                 size = (sink_imatrix.size[0], I*K)
                                ))
        source_demands.append(-dem[2])
        sink_demands.append(-dem[2])
       
    print "appending more imatrix"
    #compute the last part
    matrices.append(
        sparse(([matrix(i_matrix).trans()] * K)).trans()
    )

    print "A"
    A = sparse(matrices)

    #Q is I x IK
    #C is I x 1
    print "C"
    C = np.zeros(I)
    q_entries = [] #list of spmatrix
    print "qs"
    for i,edge in enumerate(road_network.edges()):
        q_vals = 1
        q_cols = [k*I + i for k in range(K)]
        q_rows = [0 for k in range(K)]
        q_entries.append(spmatrix(q_vals, q_rows, q_cols, size=(1, I*K)))
        C[i] += road_network[edge[0]][edge[1]][capacity]


    #add them to Q and C, multiply source by -1
    print "concats"
    temp_c = np.concatenate((C, np.array(source_demands)), axis = 0)
    q_entries += source_rows
    temp_c2 = np.concatenate((temp_c, np.array(sink_demands)), axis = 0)
    del temp_c
    C_cr = matrix(np.concatenate((temp_c2, np.zeros(I*K)), axis = 0))
    q_entries += sink_rows
    q_entries += [spdiag( matrix([-1] * I*K) )]
    # add the grater than zero
    print "Q"
    Q_cr = sparse(q_entries)

    #add the a last block which is going to ensure we add the flows to zero
    print "h"
    h = matrix(np.zeros((A.size[0], 1)))
    #print c.size, Q_cr.size, C_cr.size, A.size, h.size
    #print A[-S:,-I:]
    print "solver"
    res = solvers.lp(c, Q_cr, C_cr, A = A, b = h, solver='glpk')
    #print np.array(Q_cr)
    #res = linprog(to_np_array(c), A_eq=np.array(matrix(A)), b_eq=to_np_array(h),
    #              A_ub = np.array(matrix(Q_cr)), b_ub = to_np_array(C_cr), options={"disp": True,"maxiter":10000})
    print "flows"
    flows = np.reshape(np.array(res['x']),(K,I)).transpose()

    #get the new demands based on the flows
    print "demands"
    demand_results = np.dot(i_matrix, flows)
    D = np.zeros((S, S), dtype = np.float64)
    demands_cr = []
    for l,demand in enumerate(demands):
        D[demand[0]][demand[1]] += demand_results[demand[0]][l]
        demands_cr += [(demand[0],demand[1],demand_results[demand[0]][l])]

    return flows, demands_cr, D
Exemple #50
0
def solve_asymp_reb(road_network, demands, capacity='capacity', cost='time', 
                    capacitated=True, availability=1.0, max_exceedence=None):
    '''
    Solves the capacitated rebalancing problem using the Asymptotic Assumption.
    road_network: networkx.DiGraph
    arguments:
        demands:    list of tuples of the form [(source_node, sink_node, demand)]
                    Demands are assumed to be in trips per hour and their source/dest are the order in 
                    which nodes appear in the function road_network.nodes()
        capacity:   label to use for capacity in the network graph.
                    the value should be in terms of throughput (vehicles per hour)
        cost:       label to use for cost of trip in link
                    the value should be in terms of time to travel link in freeflow
        capacitated:bool, whether to take road capacity into effect
    
    returns:
        flows:      2D numpy array where each column contains the flow at each edge for that specific class
                    The order of the columns is identical to that of the order of demands in the input arg
                    and the order of the rows identical to the order of road_network.edges()
        demands_cr: updated demand list of tuples
        P:          Transition matrix between nodes
    '''

    #number of classes, nodes, edges
    K = len(demands)
    S = len(road_network.nodes())
    I = len(road_network.edges())

    #cost vector 1XI
    c = matrix(
        [road_network[e[0]][e[1]][cost] for e in road_network.edges()]*K
    )
    print "cost vector"

    #A is a block diagonal matrix of size 
    #we need to remove the rows that correspond to the sources and sinks for each class
    #and we need to add a tile i_matrix at the end
    i_matrix = -nx.incidence_matrix(road_network, oriented=True).toarray()
    #matrices = []'
    matrices = {'x':[]
               ,'I':[]
               ,'J':[]}
    sources = {'x':[]
               ,'I':[]
               ,'J':[]}
    sinks = {'x':[]
               ,'I':[]
               ,'J':[]}
    source_rows= []
    sink_rows = []
    source_demands = []
    sink_demands = []
    print "masks"
    mask = np.ones(S, dtype=bool)
    mask_source = np.ones(S, dtype=bool)
    mask_sink = np.ones(S, dtype=bool)
    

    for k, dem in enumerate(demands):
        if k == 0 or k % 100 == 0:
        	# print every 100 iterations, collect garbage
            print 'dem', k, '/', K
            gc.collect()
        mask[:] = True
        mask_source[:] = True
        mask_sink[:] = True
        mask[[dem[0],dem[1]]] = False
        mask_source[[dem[0]]] = False
        mask_sink[[dem[1]]] = False
        
        sparse_imatrix = sparse(matrix(i_matrix[mask]))
        
        #matrices.append(spmatrix(sparse_imatrix.V, 
        #                         sparse_imatrix.I, 
        #                         sparse_imatrix.J + I*k,
        #                         size = (sparse_imatrix.size[0], I*K)
        #                        ))
        if matrices['I']:
            base_row = max(matrices['I']) + 1
        else:
            base_row = 0
        matrices['x'] += list(sparse_imatrix.V)
        matrices['I'] += list(sparse_imatrix.I + base_row)
        matrices['J'] += list(sparse_imatrix.J + I*k)
        
        #print preceding_zeros.shape, i_matrix[~mask_source].shape, following_zeros.shape
        
        source_imatrix = sparse(matrix(-i_matrix[~mask_source]))
        
        sink_imatrix = sparse(matrix(i_matrix[~mask_sink]))
        
        #source_rows.append(spmatrix(source_imatrix.V, 
        #                         source_imatrix.I, 
        #                         source_imatrix.J + I*k,
        #                         size = (source_imatrix.size[0], I*K)
        #                        ))
        sources['x'] += list(source_imatrix.V)
        sources['I'] += list(source_imatrix.I + k)
        sources['J'] += list(source_imatrix.J + I*k)
        
        #sink_rows.append(spmatrix(sink_imatrix.V, 
        #                         sink_imatrix.I, 
        #                         sink_imatrix.J + I*k,
        #                         size = (sink_imatrix.size[0], I*K)
        #                        ))
        sinks['x'] += list(sink_imatrix.V)
        sinks['I'] += list(sink_imatrix.I + k)
        sinks['J'] += list(sink_imatrix.J + I*k)
        
        source_demands.append(-dem[2])
        sink_demands.append(-dem[2])
        
        
    
    print "First loop done"

    #compute the last part
    temp_sparse = sparse(([matrix(i_matrix).trans()] * K)).trans()
    base_row = max(matrices['I']) + 1
    matrices['x'] += list(temp_sparse.V)
    matrices['I'] += list(temp_sparse.I + base_row)
    matrices['J'] += list(temp_sparse.J)

    
    print "A"
    A = spmatrix(matrices['x']
                ,matrices['I'] 
                ,matrices['J'])
    del temp_sparse
    del matrices

    
    #Q is I x IK
    #C is I x 1
    print "C"
    C = np.zeros(I)
    q_entries = {'x':[]
               ,'I':[]
               ,'J':[]} #list of spmatrix

    print "qs"
    for i,edge in enumerate(road_network.edges()):
        if q_entries['I']:
            base_row = max(q_entries['I']) + 1
        else:
            base_row = 0
        q_vals = [1 for k in range(K)]
        q_cols = [k*I + i for k in range(K)]
        q_rows = [base_row  for k in range(K)]
        q_entries['x'] += q_vals
        q_entries['I'] += q_rows
        q_entries['J'] += q_cols
        if max_exceedence and max_exceedence < 0.4:
            cap = gammaincinv(road_network[edge[0]][edge[1]][capacity] + 1, max_exceedence)
            C[i] +=  cap / road_network[edge[0]][edge[1]][cost]
        else:
            C[i] += road_network[edge[0]][edge[1]][capacity] / road_network[edge[0]][edge[1]][cost]
    print 'max_row_Q',max(q_entries['I']),I-1


    #add them to Q and C, multiply source by -1
    print "concats"
    temp_c = np.concatenate((C, np.array(source_demands)), axis = 0)
    base_row = max(q_entries['I']) + 1
    q_entries['x'] += sources['x']
    q_entries['I'] += list(np.array(sources['I']) + base_row)
    q_entries['J'] += sources['J']
    print 'max_row_Q',max(q_entries['I']),I+K
    temp_c2 = np.concatenate((temp_c, np.array(sink_demands)), axis = 0)
    del temp_c
    C_cr = matrix(np.concatenate((temp_c2, np.zeros(I*K)), axis = 0))
    base_row = max(q_entries['I']) + 1
    q_entries['x'] += sinks['x']
    q_entries['I'] += list(np.array(sinks['I']) + base_row)
    q_entries['J'] += sinks['J']
    print 'max_row_Q',max(q_entries['I']),I+ 2*K - 1
    # add the greater than zero 
    base_row = max(q_entries['I']) + 1
    q_entries['x'] += [-1]* I*K
    q_entries['I'] += [i+base_row for i in range(I*K)]
    q_entries['J'] += [i for i in range(I*K)]
    print 'max_row_Q',max(q_entries['I']),I+2*K+I*K - 1
    print "Q"
    Q = spmatrix(q_entries['x']
                ,q_entries['I'] 
                ,q_entries['J'])


    #add the a last block which is going to ensure we add the flows to zero
    print "h"
    h = matrix(np.zeros((A.size[0], 1)))
    print c.size, Q.size, C_cr.size, A.size, h.size, h.typecode
    #print A[-S:,-I:]
    

    print "solver"
    res = solvers.lp(c, Q, C_cr, A = A, b = h, solver='mosek')
    #print np.array(Q_cr)
    #res = linprog(to_np_array(c), A_eq=np.array(matrix(A)), b_eq=to_np_array(h), 
    #              A_ub = np.array(matrix(Q_cr)), b_ub = to_np_array(C_cr), options={"disp": True,"maxiter":10000})
    print "flows"
    flows = np.reshape(np.array(res['x']),(K,I)).transpose()
    
    #get the new demands based on the flows
    print "demands"
    demand_results = np.dot(i_matrix, flows)
    D = np.zeros((S, S), dtype = np.float64)
    demands_cr = []
    for l,demand in enumerate(demands):
        D[demand[0]][demand[1]] += demand_results[demand[0]][l]
        demands_cr += [(demand[0],demand[1],demand_results[demand[0]][l])]
    
    return flows, demands_cr, D