Exemple #1
0
 def test_degree_digraph(self):
     H = nx.DiGraph()
     H.add_edges_from([(1, 24), (1, 2)])
     assert sorted(d for n, d in H.in_degree([1, 24])) == [0, 1]
     assert sorted(d for n, d in H.out_degree([1, 24])) == [0, 2]
     assert sorted(d for n, d in H.degree([1, 24])) == [1, 2]
def generate_simulated_ivlt_experiment(mutation_prob_map, variable_dropout_prob_map, characters=10, gen_per_dish=7, num_splits = 2, subsample_percentage = 0.1):
	"""
	Given the following parameters, this method simulates the cell division and mutations. As with `generate_simulated_full_tree`:
		- Cells/Samples are treated as a string, with a unique identifier appended to the end of the string,
		  in the form sample = 0|3|0|12, where len(sample.split('|')) = characters
		- Each generation, all cells are duplicated, and each character is independently transformed
		  with the probabilities of transformation defined in mutation_prob_map
		- At the end of this process of duplication, there will be 2 ^ depth samples.
		- We sample subsample_percentage of the 2 ^ depth samples
		- On the subsampled population, we simulate dropout on each individual character in each sample
		  with probability variable_dropout_prob_map
		- At every GEN_PER_DISH generations, we also simulate a split and maintain these
		sample labels for each cell going forwards.

	:param mutation_prob_map:
		A nested dictionary containing mutation probabilities for [character][state] mappings
		where characters are in the form of integers, and states are in the form of strings,
		and values are the probability of mutation from the '0' state.
		I.e {0:{"0":0.975, "1":0.25},....}
	:param variable_dropout_prob_map:
		A dictionary containing dropout probabilities for each individual character
		I.e {0: 0.05, 1: 0.01, 2: 0.2,...}
	:param characters:
		The number of characters to simulate
	:param gen_per_dish:
		The number of generations between splits.
	:param num_splits:
		The number of splits to simulate. Depth is simply num_splits * gen_per_dish.
	:param subsample_percentage:
		Percentage of population to subsample after the final generation
	:return:
		A networkx tree of samples
	"""

	network = nx.DiGraph()
	current_depth = [[['0' for _ in range(0, characters)], "0"]]
	network.add_node(node_to_string(current_depth[0]))
	network.node[node_to_string(current_depth[0])]["plate"] = ""
	uniq = 1


	#simulate two splits total
	total_depth = (num_splits + 1) * gen_per_dish
	for i in tqdm(range(0, total_depth), desc="Generating cells at each level in tree"):
		temp_current_depth = []
		for node in current_depth:
			for _ in range(0, 2):
				child_node = simulate_mutation(node[0], mutation_prob_map)
				if i == total_depth - 1:
					child_node = simulate_dropout(child_node, variable_dropout_prob_map)

				temp_current_depth.append([child_node, uniq])
				network.add_edge(node_to_string(node), node_to_string([child_node, str(uniq)]))
				if i != 0 and i % gen_per_dish == 0:
					split_right = (np.random.random() > 0.5) # split in half
					if split_right:
						network.node[node_to_string([child_node, str(uniq)])]["plate"] = network.node[node_to_string(node)]["plate"] + "0"
					else:
						network.node[node_to_string([child_node, str(uniq)])]["plate"] = network.node[node_to_string(node)]["plate"] + "1"

				else:
					network.node[node_to_string([child_node, str(uniq)])]["plate"] = network.node[node_to_string(node)]["plate"]

				uniq += 1

			current_depth = temp_current_depth

	subsampled_population_for_removal = random.sample(current_depth, int((1-subsample_percentage) * len(current_depth)))

	for node in subsampled_population_for_removal:
		network.remove_node(node_to_string(node))

	return network
Exemple #3
0
def foo(tree):
    G = nx.DiGraph()
    for u,v in postorder_edge(tree):
        G.add_edge(id(u),id(v))
    return G
Exemple #4
0
def show_graph():
    g = generate_graph()
    ng = nx.DiGraph(g)
    nx.draw(ng)
    plt.show()
Exemple #5
0
def drawNetwork(Gx=None,
                Gxu=None,
                nodes=None,
                edges=None,
                forceiter=100,
                grphtype='undirected',
                dx=10,
                dy=10,
                colormap='jet',
                scale=1.0,
                layout='force',
                drawlabels=True,
                giant=False,
                equi=False,
                res=0.5,
                k=None,
                edge_labels=False,
                font=12):

    if grphtype == 'directed':
        if Gx == None and Gxu == None:
            Gx = nx.from_pandas_edgelist(edges,
                                         'Source',
                                         'Target', ['Weight'],
                                         create_using=nx.DiGraph())
            Gxu = nx.from_pandas_edgelist(edges, 'Source', 'Target',
                                          ['Weight'])
        if giant:
            print('not implemented')
    else:
        if Gx == None:
            Gx = nx.from_pandas_edgelist(edges, 'Source', 'Target', ['Weight'])
        if giant and not nx.is_connected(Gx):
            S = [Gx.subgraph(c).copy() for c in nx.connected_components(Gx)]
            size = []
            for s in S:
                size.append(len(s))
            idsz = np.argsort(size)
            print('found ', np.array(size)[idsz], ' connected components')
            index = int(input('enter index '))
            Gx = S[idsz[index]]
    if layout == 'force' or layout == None:
        pos = nx.spring_layout(Gx, k=1, iterations=forceiter)
    elif layout == 'spiral':
        pos = nx.spiral_layout(Gx, equidistant=equi, resolution=res)
    df = np.array(nodes)
    if len(df.shape) == 1:
        df = np.reshape(df, (len(df), 1))
    nodelabel = dict(
        zip(np.linspace(0, len(df[:, 0]) - 1, len(df[:, 0]), dtype=int),
            df[:, 0]))
    labels = {}
    for idx, node in enumerate(Gx.nodes()):
        labels[node] = nodelabel[int(node)]
    if grphtype == 'directed':
        part = cm.best_partition(Gxu)
        values = [part.get(node) for node in Gxu.nodes()]
    else:
        part = cm.best_partition(Gx)
        values = [part.get(node) for node in Gx.nodes()]
    d = nx.degree(Gx)
    dsize = [(d[v] + 1) * 100 * scale for v in Gx.nodes()]
    plt.figure(figsize=(dx, dy))
    if edge_labels:
        edge_labels = nx.get_edge_attributes(Gx, 'Label')
        nx.draw_networkx_edge_labels(Gx, pos, edge_labels, font_size=font)
    nx.draw_networkx(Gx,
                     pos=pos,
                     labels=labels,
                     with_labels=drawlabels,
                     cmap=plt.get_cmap(colormap),
                     node_color=values,
                     node_size=dsize)
    plt.show()
Exemple #6
0
import v_jhmmtg_5
import junction_init as ji
import big_junction_init as bji
import math as m
import bf_test as bf
import jhmmtg as jh
import big_jhmmtg as bjh
import tgeaa as tg
import HRLB as hr
# import big_HRLB as bhr
import HMMM as hm
import time as tim
import random
import mcds

v_graph = nx.DiGraph()


def calibration(node_num, node_info_dict, time_interval):
    v_graph.clear()
    for i in range(0, node_num):
        for j in range(0, i):
            a = pow((node_info_dict[i][0][0] +
                     node_info_dict[i][1][0] * time_interval) -
                    (node_info_dict[j][0][0] +
                     node_info_dict[j][1][0] * time_interval), 2) + pow(
                         (node_info_dict[i][0][1] +
                          node_info_dict[i][1][1] * time_interval) -
                         (node_info_dict[j][0][1] +
                          node_info_dict[j][1][1] * time_interval), 2)
            if a < pow(Gp.com_dis, 2):
def get_graph_from_molecule(molecule):
    # Parse molecule file
    na = int(molecule[0][0])
    coordinates = [
        [c[0], float(c[1].replace("*^", "e")), float(c[2].replace("*^", "e")),
            float(c[3].replace("*^", "e")), float(c[4].replace("*^", "e"))]
        for c in molecule[2:(na+2)]
    ]
    properties = dict(zip(
        [
            "id", "rotational_a", "rotational_b", "rotational_c", "dipole_moment", "polarizability",
            "homo_energy", "lumo_energy", "spatial_extent", "internal_energy_0k",
            "internal_energy_298k", "free_energy", "heat_capacity"],
        [float(e.replace("gdb", "").strip()) for e in molecule[1][:-1]]
    ))
    smiles = molecule[na+3][0]
    graph = pysmiles.read_smiles(smiles, explicit_hydrogen=True)
    mol = Chem.MolFromSmiles(smiles)
    # One-hot encode element
    nx.set_node_attributes(graph, {
        k: {
            "entity": "atom",
            "element_c": int(d["element"] == "C"),
            "element_f": int(d["element"] == "F"),
            "element_h": int(d["element"] == "H"),
            "element_n": int(d["element"] == "N"),
            "element_o": int(d["element"] == "O"),
            "acceptor": int(d["charge"] > 0),
            "donor": int(d["charge"] < 0),
        }
        for k, d in dict(graph.nodes(data=True)).items()
    })
    # Add Chem molecule attributes
    hybridizations = ["SP", "SP2", "SP3"]
    nx.set_node_attributes(graph, {
        **{
            atom.GetIdx(): {
                "aromatic": int(atom.GetIsAromatic()),
                "atomic_number": atom.GetAtomicNum(),
                "hybridization_null": int(str(atom.GetHybridization()) not in hybridizations),
                "hybridization_sp": int(str(atom.GetHybridization()) == hybridizations[0]),
                "hybridization_sp2": int(str(atom.GetHybridization()) == hybridizations[1]),
                "hybridization_sp3": int(str(atom.GetHybridization()) == hybridizations[2]),
                "hydrogen_count": atom.GetNumImplicitHs(),
            }
            for atom in mol.GetAtoms()
        }, **{
            k: {
                "aromatic": 0,
                "atomic_number": 1,
                "hybridization_null": 1,
                "hybridization_sp": 0,
                "hybridization_sp2": 0,
                "hybridization_sp3": 0,
                "hydrogen_count": 0
            }
            for k in range(mol.GetNumAtoms(), graph.number_of_nodes())
        }
    })
    # Set edge attributes
    nx.set_edge_attributes(graph, {
        (src, tgt): {
            "distance": np.sqrt(np.sum(np.square(
                np.array(coordinates[tgt][1:]) - np.array(coordinates[src][1:])
            ))),
            "order_1": int(d["order"] == 1),
            "order_1_5": int(d["order"] == 1.5),
            "order_2": int(d["order"] == 2),
            "order_3": int(d["order"] == 3),
        }
        for src, tgt, d in list(graph.edges(data=True))
    })
    # Add graph level targets
    for key in [k for k in properties if k != "id"]:
        graph.graph[key] = properties[key]
    # Turn into directed graph
    digraph = nx.DiGraph(graph)
    return digraph
def main():
    WT = 'VDGV'
    fitfile = 'result/Mutfit'
    missfitfile = 'result/regression_missing'
    #missfitfile  = 'result/regression_all_WT'
    outfile = 'analysis/LocalMaxEvolvePotWTnuc'  #+'_pair'
    degfile = 'analysis/LocalMaxEvolveDegreeWTnuc'  #+'_pair'
    fcutoff = -1
    fitfold = float(1)
    condition = 'I20fit'
    Index2pos = {0: 39, 1: 40, 2: 41, 3: 54}
    codondists = codondistancemap()
    AAgraph = buildaagraph(codondists)
    AAtransitionmatrix(AAgraph, 'analysis/AAtransitionmatrix')
    fithash = TsvWithHeader2Hash(fitfile, condition)
    print "Total # of variants in the raw data: %d" % len(fithash.keys())
    fithash = fillinmissing(fithash, missfitfile, condition)
    muts = fithash.keys()
    print "Total # of variants after fill in with regression: %d" % len(muts)
    print "# of mutant pass cutoff: %d" % len(muts)
    G = nx.DiGraph()
    G = buildDigraph(G, muts, fithash, condition, codondists)
    print 'Finish building digraph with %d nodes and %d edges' % (len(
        G.nodes()), len(G.edges()))
    print 'Analyzing degrees for each node'
    degreeanalysis(degfile, G, fithash, condition)
    outfile = open(outfile, 'w')
    header = "\t".join(['mut', 'HD', 'fit', 'PathLength', 'Path', 'Direct'])
    outfile.write(header + "\n")
    WTfit = float(fithash[WT][condition])
    print 'Working %s with fitness %f' % (WT, WTfit)
    AllEnds = [
        j for j in muts if float(fithash[j][condition]) > WTfit * fitfold
    ]
    ReachEnds = single_source_shortest_path(G, WT)
    del ReachEnds[WT]
    for End in ReachEnds.keys():
        if float(fithash[End][condition]) <= WTfit * fitfold:
            del ReachEnds[End]
    print "Total Number of variants with fitness higher than %f = %d (Reachable = %d)" % (
        WTfit, len(AllEnds), len(ReachEnds.keys()))
    for End in AllEnds:
        if End == 'VDGV': continue
        Endfit = fithash[End][condition]
        EndHD = hamming(WT, End)
        AADist = ShortestAAPath(WT, End, AAgraph)
        if ReachEnds.has_key(End):
            Endpl = len(ReachEnds[End]) - 1
            Endpath = '->'.join(ReachEnds[End])
        else:
            Endpl = -1
            Endpath = 'NA'
        if Endpl == AADist: Direct = 'Yes'
        elif Endpl == -1: Direct = 'Inaccessible'
        elif AADist < Endpl: Direct = 'No'
        else:
            print "Something is wrong"
            sys.exit()
        outfile.write(
            "\t".join(map(str, [End, EndHD, Endfit, Endpl, Endpath, Direct])) +
            "\n")
    outfile.close()
Exemple #9
0
 def __init__(self):
     self.string = r''
     self.models_str = list()
     self.models = list()
     self.graph = nx.DiGraph()
Exemple #10
0
    def __init__(self, *args, **kwargs):
        '''
        num_periods = number of periods in simulation.
        Node specific parameters:
            - I0 = initial inventory.
            - C = production capacity.
            - v = production yield in the range (0, 1].
            - o = unit operating cost (feed-based)
            - h = unit holding cost for excess on-hand inventory.
        Edge specific parameters:
            - L = lead times in betwen adjacent nodes.
            - p = unit price to send material between adjacent nodes (purchase price/reorder cost)
            - b = unit backlog cost or good-wil loss for unfulfilled market demand between adjacent retailer and market.
            - g = unit holding cost for pipeline inventory on a specified edge.
            - prob_dist = probability distribution function on a (retailer, market) edge.
            - demand_dist = demand distribution for (retailer, market) edge. Two options:
                - use scipy probability distribution: must be a lambda function calling the rvs method of the distribution
                    i.e. lambda: poisson.rvs(mu=20)
                - use a list of user specified demands for each period. 
        backlog = Are unfulfilled orders backlogged? True = backlogged, False = lost sales.
        demand_dist = distribution function for customer demand (e.g. poisson, binomial, uniform, geometric, etc.)
        dist_param = named values for parameters fed to statistical distribution.
            poisson: {'mu': <mean value>}
            binom: {'n': <mean value>, 
                    'p': <probability between 0 and 1 of getting the mean value>}
            raindint: {'low' = <lower bound>, 'high': <upper bound>}
            geom: {'p': <probability. Outcome is the number of trials to success>}
        alpha = discount factor in the range (0,1] that accounts for the time value of money
        seed_int = integer seed for random state.
        user_D = dictionary containing user specified demand (list) for each (retail, market) pair at
            each time period in the simulation. If all zeros, ignored; otherwise, demands will be taken from this list.
        sample_path = dictionary specifying if is user_D (for each (retail, market) pair) is sampled from demand_dist.
        '''
        # set default (arbitrary) values when creating environment (if no args or kwargs are given)
        self._max_rewards = 2000
        self.num_periods = 30
        self.backlog = True
        self.alpha = 1.00
        self.seed_int = 0
        self.user_D = {(1,0): np.zeros(self.num_periods)}
        self.sample_path = {(1,0): False}
        self._max_rewards = 2000

        # create graph
        self.graph = nx.DiGraph()
        # Market 
        self.graph.add_nodes_from([0])
        # Retailer
        self.graph.add_nodes_from([1], I0 = 100,
                                        h = 0.030)
        # Distributors
        self.graph.add_nodes_from([2], I0 = 110,
                                        h = 0.020)
        self.graph.add_nodes_from([3], I0 = 80,
                                        h = 0.015)
        # Manufacturers
        self.graph.add_nodes_from([4], I0 = 400,
                                        C = 90,
                                        o = 0.010,
                                        v = 1.000,
                                        h = 0.012)
        self.graph.add_nodes_from([5], I0 = 350,
                                        C = 90,
                                        o = 0.015,
                                        v = 1.000,
                                        h = 0.013)
        self.graph.add_nodes_from([6], I0 = 380,
                                        C = 80,
                                        o = 0.012,
                                        v = 1.000,
                                        h = 0.011)
        # Raw materials
        self.graph.add_nodes_from([7, 8])
        # Links
        self.graph.add_edges_from([(1,0,{'p': 2.000,
                                         'b': 0.100,
                                         'demand_dist': poisson,
                                         'dist_param': {'mu': 20}}),
                                   (2,1,{'L': 5,
                                         'p': 1.500,
                                         'g': 0.010}),
                                   (3,1,{'L': 3,
                                         'p': 1.600,
                                         'g': 0.015}),
                                   (4,2,{'L': 8,
                                         'p': 1.000,
                                         'g': 0.008}),
                                   (4,3,{'L': 10,
                                         'p': 0.800,
                                         'g': 0.006}),
                                   (5,2,{'L': 9,
                                         'p': 0.700,
                                         'g': 0.005}),
                                   (6,2,{'L': 11,
                                         'p': 0.750,
                                         'g': 0.007}),
                                   (6,3,{'L': 12,
                                         'p': 0.800,
                                         'g': 0.004}),
                                   (7,4,{'L': 0,
                                         'p': 0.150,
                                         'g': 0.000}),
                                   (7,5,{'L': 1,
                                         'p': 0.050,
                                         'g': 0.005}),
                                   (8,5,{'L': 2,
                                         'p': 0.070,
                                         'g': 0.002}),
                                   (8,6,{'L': 0,
                                         'p': 0.200,
                                         'g': 0.000})])
        
        # add environment configuration dictionary and keyword arguments
        assign_env_config(self, kwargs)

        # Save user_D and sample_path to graph metadata
        for link in self.user_D.keys():
            d = self.user_D[link]
            if np.sum(d) != 0:
                self.graph.edges[link]['user_D'] = d
                if link in self.sample_path.keys():
                    self.graph.edges[link]['sample_path'] = self.sample_path[link]
            else:
                # Placeholder to avoid key errors
                self.graph.edges[link]['user_D'] = 0
        
        self.num_nodes = self.graph.number_of_nodes()
        self.adjacency_matrix = np.vstack(self.graph.edges())
        # Set node levels
        self.levels = {}
        self.levels['retailer'] = np.array([1])
        self.levels['distributor'] = np.unique(np.hstack(
            [list(self.graph.predecessors(i)) for i in self.levels['retailer']]))
        self.levels['manufacturer'] = np.unique(np.hstack(
            [list(self.graph.predecessors(i)) for i in self.levels['distributor']]))
        self.levels['raw_materials'] = np.unique(np.hstack(
            [list(self.graph.predecessors(i)) for i in self.levels['manufacturer']]))

        self.level_col = {'retailer': 0,
                    'distributor': 1,
                    'manufacturer': 2,
                    'raw_materials': 3}

        self.market = [j for j in self.graph.nodes() if len(list(self.graph.successors(j))) == 0]
        self.distrib = [j for j in self.graph.nodes() if 'C' not in self.graph.nodes[j] and 'I0' in self.graph.nodes[j]]
        self.retail = [j for j in self.graph.nodes() if len(set.intersection(set(self.graph.successors(j)), set(self.market))) > 0]
        self.factory = [j for j in self.graph.nodes() if 'C' in self.graph.nodes[j]]
        self.rawmat = [j for j in self.graph.nodes() if len(list(self.graph.predecessors(j))) == 0]
        self.main_nodes = np.sort(self.distrib + self.factory)
        self.reorder_links = [e for e in self.graph.edges() if 'L' in self.graph.edges[e]] #exclude links to markets (these cannot have lead time 'L')
        self.retail_links = [e for e in self.graph.edges() if 'L' not in self.graph.edges[e]] #links joining retailers to markets
        self.network_links = [e for e in self.graph.edges()] #all links involved in sale in the network

        # check inputs
        assert set(self.graph.nodes()) == set.union(set(self.market),
                                                    set(self.distrib),
                                                    set(self.factory),
                                                    set(self.rawmat)), "The union of market, distribution, factory, and raw material nodes is not equal to the system nodes."
        for j in self.graph.nodes():
            if 'I0' in self.graph.nodes[j]:
                assert self.graph.nodes[j]['I0'] >= 0, "The initial inventory cannot be negative for node {}.".format(j)
            if 'h' in self.graph.nodes[j]:
                assert self.graph.nodes[j]['h'] >= 0, "The inventory holding costs cannot be negative for node {}.".format(j)
            if 'C' in self.graph.nodes[j]:
                assert self.graph.nodes[j]['C'] > 0, "The production capacity must be positive for node {}.".format(j)
            if 'o' in self.graph.nodes[j]:
                assert self.graph.nodes[j]['o'] >= 0, "The operating costs cannot be negative for node {}.".format(j)
            if 'v' in self.graph.nodes[j]:
                assert self.graph.nodes[j]['v'] > 0 and self.graph.nodes[j]['v'] <= 1, "The production yield must be in the range (0, 1] for node {}.".format(j)
        for e in self.graph.edges():
            if 'L' in self.graph.edges[e]:
                assert self.graph.edges[e]['L'] >= 0, "The lead time joining nodes {} cannot be negative.".format(e)
            if 'p' in self.graph.edges[e]:
                assert self.graph.edges[e]['p'] >= 0, "The sales price joining nodes {} cannot be negative.".format(e)
            if 'b' in self.graph.edges[e]:
                assert self.graph.edges[e]['b'] >= 0, "The unfulfilled demand costs joining nodes {} cannot be negative.".format(e)
            if 'g' in self.graph.edges[e]:
                assert self.graph.edges[e]['g'] >= 0, "The pipeline inventory holding costs joining nodes {} cannot be negative.".format(e)
            if 'sample_path' in self.graph.edges[e]:
                assert isinstance(self.graph.edges[e]['sample_path'], bool), "When specifying if a user specified demand joining (retailer, market): {} is sampled from a distribution, sample_path must be a Boolean.".format(e)
            if 'demand_dist' in self.graph.edges[e]:
                dist = self.graph.edges[e]['demand_dist'] #extract distribution
                assert dist.cdf(0,**self.graph.edges[e]['dist_param']), "Wrong parameters passed to the demand distribution joining (retailer, market): {}.".format(e)
        assert self.backlog == False or self.backlog == True, "The backlog parameter must be a boolean."
        assert self.graph.number_of_nodes() >= 2, "The minimum number of nodes is 2. Please try again"
        assert self.alpha>0 and self.alpha<=1, "alpha must be in the range (0, 1]."
        
        # set random generation seed (unless using user demands)
        self.seed(self.seed_int)
        
        # action space (reorder quantities for each node for each supplier; list)
        # An action is defined for every node
        num_reorder_links = len(self.reorder_links) 
        self.lt_max = np.max([self.graph.edges[e]['L'] for e in self.graph.edges() if 'L' in self.graph.edges[e]])
        self.init_inv_max = np.max([self.graph.nodes[j]['I0'] for j in self.graph.nodes() if 'I0' in self.graph.nodes[j]])
        self.capacity_max = np.max([self.graph.nodes[j]['C'] for j in self.graph.nodes() if 'C' in self.graph.nodes[j]])
        self.pipeline_length = sum([self.graph.edges[e]['L']
            for e in self.graph.edges() if 'L' in self.graph.edges[e]])
        self.lead_times = {e: self.graph.edges[e]['L'] 
            for e in self.graph.edges() if 'L' in self.graph.edges[e]}
        self.obs_dim = self.pipeline_length + len(self.main_nodes) + len(self.retail_links)
        # self.pipeline_length = len(self.main_nodes)*(self.lt_max+1)
        self.action_space = gym.spaces.Box(
            low=np.zeros(num_reorder_links),
            high=np.ones(num_reorder_links)*(self.init_inv_max + self.capacity_max*self.num_periods), 
            dtype=np.int32)
        # observation space (total inventory at each node, which is any integer value)
        self.observation_space = gym.spaces.Box(
            low=np.ones(self.obs_dim)*np.iinfo(np.int32).min,
            high=np.ones(self.obs_dim)*np.iinfo(np.int32).max,
            dtype=np.int32)
            # low=-np.ones(self.pipeline_length)*(self.init_inv_max + self.capacity_max*self.num_periods)*10,
            # high=np.ones(self.pipeline_length)*(self.init_inv_max + self.capacity_max*self.num_periods), 
            # dtype=np.int32)

        # intialize
        self.reset()
Exemple #11
0
import networkx as nx
import numpy as np
import itertools

## We define each S* motif as a directed graph in networkx
motifs = {
    'S1': nx.DiGraph([(1, 2), (2, 3)]),
    'S2': nx.DiGraph([(1, 2), (1, 3), (2, 3)]),
    'S3': nx.DiGraph([(1, 2), (2, 3), (3, 1)]),
    'S4': nx.DiGraph([(1, 2), (3, 2)]),
    'S5': nx.DiGraph([(1, 2), (1, 3)])
}


def mcounter(gr, mo):
    """Counts motifs in a directed graph

    :param gr: A ``DiGraph`` object
    :param mo: A ``dict`` of motifs to count
    :returns: A ``dict`` with the number of each motifs, with the same keys as ``mo``

    This function is actually rather simple. It will extract all 3-grams from
    the original graph, and look for isomorphisms in the motifs contained
    in a dictionary. The returned object is a ``dict`` with the number of
    times each motif was found.::

        >>> print mcounter(gr, mo)
        {'S1': 4, 'S3': 0, 'S2': 1, 'S5': 0, 'S4': 3}

    """
    #This function will take each possible subgraphs of gr of size 3, then
def plot_net(
    params: str,
    nrvsys: Union[str, List[str]] = ["Head", "VentralCord"],
    strict_fname: bool = False,
    show_weights: bool = True,
    spring_layout: bool = False,
):
    if params.endswith('/') or (strict_fname
                                and not params.endswith('params.json')):
        params = joinPath(params, 'params.json')

    # figure out which nervous systems to plot
    if isinstance(nrvsys, str):
        nrvsys = nrvsys.split(',')

    # load network
    with open(params, 'r') as fin:
        data = json.load(fin)

    # create network
    G = nx.DiGraph()
    edges_byType: Dict[str, list] = {
        'ele': list(),
        'chem': list(),
    }
    weights_byType: Dict[str, dict] = {
        'ele': dict(),
        'chem': dict(),
    }

    for ns in nrvsys:
        for nrn in data[ns]["neurons"]:
            G.add_node(nrn)

        if ("n_units" in data[ns]) and (int(data[ns]["n_units"]) > 1):
            raise NotImplementedError()
            n_units = int(data[ns]["n_units"])
            for u in range(n_units):
                for conn in data[ns]["connections"]:
                    G.add_edge(conn["from"], conn["to"])

        else:
            for conn in data[ns]["connections"]:
                G.add_edge(conn["from"], conn["to"])
                edges_byType[conn["type"]].append((conn["from"], conn["to"]))
                weights_byType[conn["type"]][(conn["from"],
                                              conn["to"])] = conn["weight"]

    print(G.nodes())
    print(G.edges())

    if spring_layout:
        pos: Dict[str, Tuple[float, float]] = nx.spring_layout(G)
    else:
        pos = DEFAULT_POS

    nx.draw_networkx_nodes(G, pos, node_size=1500, node_color='#E3FFB2')
    nx.draw_networkx_labels(G, pos)
    # draw chem (directed)
    nx.draw_networkx_edges(
        G,
        pos,
        edgelist=edges_byType['chem'],
        edge_color='r',
        arrows=True,
        arrowsize=30,
        connectionstyle='arc3,rad=0.1',
        min_target_margin=20,
    )

    # draw ele (undirected)
    nx.draw_networkx_edges(G,
                           pos,
                           edgelist=edges_byType['ele'],
                           edge_color='b',
                           arrows=False)

    # draw weights
    if show_weights:
        nx.draw_networkx_edge_labels(
            G,
            pos,
            edge_labels=weights_byType['chem'],
        )
        nx.draw_networkx_edge_labels(
            G,
            pos,
            edge_labels=weights_byType['ele'],
        )

    plt.title(params)
    plt.show()
Exemple #13
0
def main():

    args = parse_command_line(sys.argv)

    metas = []

    if args.additional_recipes:
        add_additional_recipes(args)

    if not os.path.exists(args.dir):
        print(f"{args.dir} not found. Not generating a pipeline.")

    all_recipes = glob.glob(os.path.join(args.dir, "**", "*.yaml"))
    for f in all_recipes:
        with open(f) as fi:
            metas.append(yaml.safe_load(fi.read()))

    if len(metas) >= 1:
        requirements = {}

        for pkg in metas:
            requirements[pkg["package"]["name"]] = (
                pkg["requirements"].get("host", []) + pkg["requirements"].get("run", [])
            )

        # sort out requirements that are not built in this run
        for pkg_name, reqs in requirements.items():
            requirements[pkg_name] = [
                r.split()[0] for r in reqs if (isinstance(r, str) and r in reqs)
            ]
        print(requirements)

        G = nx.DiGraph()
        for pkg, reqs in requirements.items():
            G.add_node(pkg)
            for r in reqs:
                if r.startswith("ros-"):
                    G.add_edge(pkg, r)

        # import matplotlib.pyplot as plt
        # nx.draw(G, with_labels=True, font_weight='bold')
        # plt.show()

        tg = list(reversed(list(nx.topological_sort(G))))

        stages = []
        current_stage = []
        for pkg in tg:
            reqs = requirements.get(pkg, [])
            sort_in_stage = 0
            for r in reqs:
                # sort up the stages, until first stage found where all requirements are fulfilled.
                for sidx, stage in enumerate(stages):
                    if r in stages[sidx]:
                        sort_in_stage = max(sidx + 1, sort_in_stage)

                # if r in current_stage:
                # stages.append(current_stage)
                # current_stage = []
            if sort_in_stage >= len(stages):
                stages.append([pkg])
            else:
                stages[sort_in_stage].append(pkg)
            # current_stage.append(pkg)

        if len(current_stage):
            stages.append(current_stage)
    elif len(metas) == 1:
        fn_wo_yaml = os.path.splitext(os.path.basename(all_recipes[0]))[0]
        stages = [[fn_wo_yaml]]
        requirements = [fn_wo_yaml]
    else:
        stages = []
        requirements = []


    # filter out packages that we are not actually building
    filtered_stages = []
    for stage in stages:
        filtered = [pkg for pkg in stage if pkg in requirements]
        if len(filtered):
            filtered_stages.append(filtered)

    stages = batch_stages(filtered_stages)
    print(stages)

    # Build Linux pipeline
    azure_template = {"pool": {"vmImage": "ubuntu-16.04"}}

    azure_stages = []

    stage_names = []
    for i, s in enumerate(stages):
        stage_name = f"stage_{i}"
        stage = {"stage": stage_name, "jobs": []}
        stage_names.append(stage_name)

        for batch in s:
            pkg_jobname = '_'.join([normalize_name(pkg) for pkg in batch])
            stage["jobs"].append(
                {
                    "job": pkg_jobname,
                    "steps": [
                        {
                            "script": azure_linux_script,
                            "env": {
                                "ANACONDA_API_TOKEN": "$(ANACONDA_API_TOKEN)",
                                "CURRENT_RECIPES": f"{' '.join([pkg for pkg in batch])}",
                                "DOCKER_IMAGE": "condaforge/linux-anvil-comp7",
                            },
                            "displayName": f"Build {' '.join([pkg for pkg in batch])}",
                        }
                    ],
                }
            )

        if len(stage["jobs"]) != 0:
            # all packages skipped ...
            azure_stages.append(stage)

    azure_template["trigger"] = [args.trigger_branch]
    azure_template["pr"] = "none"
    if azure_stages:
        azure_template["stages"] = azure_stages

    if args.platform == "linux-64" and len(azure_stages):
        with open("linux.yml", "w") as fo:
            fo.write(yaml.dump(azure_template, sort_keys=False))

    # Build OSX pipeline
    azure_template = {"pool": {"vmImage": "macOS-10.15"}}

    azure_stages = []

    stage_names = []
    for i, s in enumerate(stages):
        stage_name = f"stage_{i}"
        stage = {"stage": stage_name, "jobs": []}
        stage_names.append(stage_name)

        for batch in s:
            pkg_jobname = '_'.join([normalize_name(pkg) for pkg in batch])
            stage["jobs"].append(
                {
                    "job": pkg_jobname,
                    "steps": [
                        {
                            "script": azure_osx_script,
                            "env": {
                                "ANACONDA_API_TOKEN": "$(ANACONDA_API_TOKEN)",
                                "CURRENT_RECIPES": f"{' '.join([pkg for pkg in batch])}"
                            },
                            "displayName": f"Build {' '.join([pkg for pkg in batch])}",
                        }
                    ],
                }
            )

        if len(stage["jobs"]) != 0:
            # all packages skipped ...
            azure_stages.append(stage)

    azure_template["trigger"] = [args.trigger_branch]
    azure_template["pr"] = "none"
    if azure_stages:
        azure_template["stages"] = azure_stages

    if args.platform == "osx-64" and len(azure_stages):
        with open("osx.yml", "w") as fo:
            fo.write(yaml.dump(azure_template, sort_keys=False))

    # Build OSX-arm64 pipeline
    azure_template = {"pool": {"vmImage": "macOS-10.15"}}

    azure_stages = []

    stage_names = []
    for i, s in enumerate(stages):
        stage_name = f"stage_{i}"
        stage = {"stage": stage_name, "jobs": []}
        stage_names.append(stage_name)

        for batch in s:
            pkg_jobname = '_'.join([normalize_name(pkg) for pkg in batch])
            stage["jobs"].append(
                {
                    "job": pkg_jobname,
                    "steps": [
                        {
                            "script": azure_osx_arm64_script,
                            "env": {
                                "ANACONDA_API_TOKEN": "$(ANACONDA_API_TOKEN)",
                                "CURRENT_RECIPES": f"{' '.join([pkg for pkg in batch])}"
                            },
                            "displayName": f"Build {' '.join([pkg for pkg in batch])}",
                        }
                    ],
                }
            )

        if len(stage["jobs"]) != 0:
            # all packages skipped ...
            azure_stages.append(stage)

    azure_template["trigger"] = [args.trigger_branch]
    azure_template["pr"] = "none"
    if azure_stages:
        azure_template["stages"] = azure_stages

    if args.platform == "osx-arm64" and len(azure_stages):
        with open("osx_arm64.yml", "w") as fo:
            fo.write(yaml.dump(azure_template, sort_keys=False))

    # Build aarch64 pipeline
    azure_template = {
        "pool": {
            "name": "Default",
            "demands": ["Agent.OS -equals linux", "Agent.OSArchitecture -equals ARM64"],
        }
    }

    azure_stages = []

    stage_names = []
    for i, s in enumerate(stages):
        stage_name = f"stage_{i}"
        stage = {"stage": stage_name, "jobs": []}
        stage_names.append(stage_name)

        for batch in s:
            pkg_jobname = '_'.join([normalize_name(pkg) for pkg in batch])
            stage["jobs"].append(
                {
                    "job": pkg_jobname,
                    "steps": [
                        {
                            "script": azure_linux_script,
                            "env": {
                                "ANACONDA_API_TOKEN": "$(ANACONDA_API_TOKEN)",
                                "CURRENT_RECIPES": f"{' '.join([pkg for pkg in batch])}",
                                "DOCKER_IMAGE": "condaforge/linux-anvil-aarch64",
                            },
                            "displayName": f"Build {' '.join([pkg for pkg in batch])}",
                        }
                    ],
                }
            )

        if len(stage["jobs"]) != 0:
            # all packages skipped ...
            azure_stages.append(stage)

    azure_template["trigger"] = [args.trigger_branch]
    azure_template["pr"] = "none"
    if azure_stages:
        azure_template["stages"] = azure_stages

    if args.platform == "linux-aarch64" and len(azure_stages):
        with open("linux_aarch64.yml", "w") as fo:
            fo.write(yaml.dump(azure_template, sort_keys=False))

    # windows
    azure_template = {"pool": {"vmImage": "vs2017-win2016"}}

    azure_stages = []

    global azure_win_script
    if os.path.exists(".scripts/build_win.bat"):
        with open(".scripts/build_win.bat", "r") as fi:
            azure_win_script = literal_unicode(fi.read())

    stage_names = []
    for i, s in enumerate(stages):
        stage_name = f"stage_{i}"
        stage = {"stage": stage_name, "jobs": []}
        stage_names.append(stage_name)

        for batch in s:
            pkg_jobname = '_'.join([normalize_name(pkg) for pkg in batch])
            stage["jobs"].append(
                {
                    "job": pkg_jobname,
                    "variables": {"CONDA_BLD_PATH": "C:\\\\bld\\\\"},
                    "steps": [
                        {
                            "powershell": 'Write-Host "##vso[task.prependpath]$env:CONDA\\Scripts"',
                            "displayName": "Add conda to PATH"
                        },
                        {
                            "script": 'conda install -c conda-forge --yes --quiet conda-build pip mamba ruamel.yaml anaconda-client',
                            "displayName": "Install conda-build, boa and activate environment"
                        },
                        {
                            "script": azure_win_preconfig_script,
                            "displayName": "conda-forge build setup",
                        },
                        {
                            "script": azure_win_script,
                            "env": {
                                "ANACONDA_API_TOKEN": "$(ANACONDA_API_TOKEN)",
                                "CURRENT_RECIPES": f"{' '.join([pkg for pkg in batch])}",
                                "PYTHONUNBUFFERED": 1,
                            },
                            "displayName": f"Build {' '.join([pkg for pkg in batch])}",
                        },
                    ],
                }
            )

        if len(stage["jobs"]) != 0:
            # all packages skipped ...
            azure_stages.append(stage)

    azure_template["trigger"] = [args.trigger_branch]
    azure_template["pr"] = "none"
    if azure_stages:
        azure_template["stages"] = azure_stages

    if args.platform.startswith("win") and len(azure_stages):
        with open("win.yml", "w") as fo:
            fo.write(yaml.dump(azure_template, sort_keys=False))
Exemple #14
0
                        u = G.out_edges(w)[0][1]
                        non_branching_path.append(u)
                        w = u
                    paths.append(non_branching_path)
    
    for cycle in nx.simple_cycles(G):
        branch = 0
        for v in cycle:
            if G.in_degree(v) != 1 or G.out_degree(v) != 1:
                branch = 1
        if branch == 0:
            cycle.append(cycle[0])
            paths.append(cycle)
    
    return paths
    
edges = []
for line in open('rosalind_ba3m.txt'):
    a, b = line.rstrip().split(' -> ')
    a = int(a)
    b = [int(i) for i in b.split(',')]
    for i in b:
        edges.append((a, i))

G = nx.DiGraph(data=edges)
result = '\n'.join([' -> '.join([str(i) for i in j]) for j in MaximalNonBranchingPaths(G)])
print(result)
open('rosalind_ba3m_sub.txt', 'wt').write(result)


Exemple #15
0
def split_by_synapse_domain(bandwidth, locations, arbors, treenode_connector,
                            minis) -> Tuple[Dict, Any]:
    """ locations: dictionary of treenode ID vs tuple with x,y,z
        arbors: dictionary of skeleton ID vs list of DiGraph (that were, or not, split by confidence)
        treenode_connectors: dictionary of treenode ID vs list of tuples of connector_id, string of 'presynaptic_to' or 'postsynaptic_to'
    """
    arbors2: Dict = {}
    # Some arbors will be split further
    for skeleton_id, graphs in arbors.items():
        subdomains: List = []
        arbors2[skeleton_id] = subdomains
        for graph in graphs:
            treenode_ids = []
            connector_ids = []
            relation_ids = []
            for treenode_id in filter(treenode_connector.has_key,
                                      graph.nodes):  # type: ignore
                for c in treenode_connector.get(treenode_id):
                    connector_id, relation = c
                    treenode_ids.append(treenode_id)
                    connector_ids.append(connector_id)
                    relation_ids.append(relation)

            if not connector_ids:
                subdomains.append(graph)
                continue

            for parent_id, treenode_id in graph.edges:
                loc0 = locations[treenode_id]
                loc1 = locations[parent_id]
                graph[parent_id][treenode_id]['weight'] = norm(
                    subtract(loc0, loc1))

            # Invoke Casey's magic
            max_density = tree_max_density(graph.to_undirected(), treenode_ids,
                                           connector_ids, relation_ids,
                                           [bandwidth])
            synapse_group = next(max_density.values())
            # The list of nodes of each synapse_group contains only nodes that have connectors
            # A local_max is the skeleton node most central to a synapse_group
            anchors = {}
            for domain in synapse_group.values():
                g = nx.DiGraph()
                g.add_nodes_from(
                    domain.node_ids
                )  # bogus graph, containing treenodes that point to connectors
                subdomains.append(g)
                anchors[domain.local_max] = g
            # Define edges between domains: create a simplified graph
            mini = simplify(graph, anchors.keys())
            # Replace each node by the corresponding graph, or a graph of a single node
            for node in mini.nodes:
                g = anchors.get(node)
                if not g:
                    # A branch node that was not an anchor, i.e. did not represent a synapse group
                    g = nx.Graph()
                    g.add_node(node, **{'branch': True})
                    subdomains.append(g)
                # Associate the Graph with treenodes that have connectors
                # with the node in the minified tree
                mini.nodes[node]['g'] = g
            # Put the mini into a map of skeleton_id and list of minis,
            # to be used later for defining intra-neuron edges in the circuit graph
            minis[skeleton_id].append(mini)

    return arbors2, minis
Exemple #16
0
 def __init__(self):
     self.components = []
     self.G = nx.DiGraph()
Exemple #17
0
# remove descendant time sereis(be led time series) 
def removedes(order,graph,lead_id):
    for cur_ts in order:
        if(order.index(cur_ts)>order.index(lead_id)): #在lead_id後面順序的時間序列
            if lead_id in graph[cur_ts].keys():
                removedes(order,graph,cur_ts)
                order.remove(cur_ts)

d = datetime(2020, 7, 27) 
leader_order={}
thres_range=np.arange(0.05, 0.45, 0.01) #threshold range
for threshold in thres_range:
    leader_order[threshold]=[]
    for t in range(1,9,1):  #往前9個時間點
        tmp_d=add_months(d,t)
        DG = nx.DiGraph() #B->A, A is leader
        mycursor1.execute("select * from AgCorrelation_week_ws12 where windowSize=12 and fir_catid2 is not NULL and fir_catid3 is NULL and agCorrelation>"+str(threshold)+" and endTime BETWEEN '"+(tmp_d-timedelta(days=1)).strftime("%Y-%m-%d")+" 12:00:00' AND '"+tmp_d.strftime("%Y-%m-%d")+" 23:30:00';")
        for i in mycursor1:
            beLed=0
            lead=0
            mycursor2.execute("select * from category where (fir_catid="+str(i[19])+" and sec_catid="+str(i[20])+") and thr_catid is NULL or (fir_catid="+str(i[16])+" and sec_catid="+str(i[17])+") and thr_catid is NULL;")
            for j in mycursor2:
                if j[1]==i[19] and j[3]==i[20]:
                    beLed=j[0]
                elif j[1]==i[16] and j[3]==i[17]:
                    lead=j[0]
            DG.add_weighted_edges_from([(beLed,lead,i[13])])

        #計算該有向權重圖的PageRank score
        pr=nx.pagerank(DG)
        for p in pr:
 def test_closeness_vitality_unweighted_digraph(self):
     G=nx.DiGraph()
     G.add_cycle([0,1,2])
     v=nx.closeness_vitality(G)
     assert_equal(v,{0:8.0, 1:8.0, 2:8.0})
Exemple #19
0
import requests
import sys
reload(sys)
sys.setdefaultencoding("utf-8")

dictt = {}
corpora_documents = []
st1 = "edges.txt"
st3 = "rank_nodes_pagerank_without_weight.txt"
st4 = "inverse_rankings.txt"
st5 = "time_diff.txt"
st6 = "ans_count.txt"
st7 = "nlp_score.txt"

nooffeatures = 12
directed_graph = nx.DiGraph()
pagerank_dict = {}
leader_fol_dict = {}
bw_centrality = {}
accepted_answer_id = {}
time_diff_of_accepted = {}
nlp_dict = {}
degree = {}
degsu = 1

graph_file = open(st1)
for line in graph_file:
    try:
        v1 = int(line.split(" ")[0])
        v2 = int(line.split(" ")[1])
        directed_graph.add_edge(v1, v2)
 def test_closeness_vitality_weighted_digraph(self):
     G=nx.DiGraph()
     G.add_cycle([0,1,2],weight=2)
     v=nx.closeness_vitality(G,weight='weight')
     assert_equal(v,{0:16.0, 1:16.0, 2:16.0})
Exemple #21
0
    def opt(self, file1, file2):
        f1 = open(file1, encoding="utf8")
        lines = f1.readlines()
        nodes = self.getegdes(lines[0])
        edges = self.getegdes(lines[1])
        data = pd.read_csv(file2)

        G = BayesianModel()
        G.add_nodes_from(nodes)
        for i in range(int(len(edges) / 2)):
            G.add_edge(edges[2 * i], edges[2 * i + 1])
        # nx.draw(G)
        # plt.show()
        k2 = K2Score(data).score(G)
        bic = BicScore(data).score(G)
        bdeu = BDeuScore(data).score(G)
        print(k2, ",", bic, ",", bdeu)

        est = HillClimbSearch(data, scoring_method=K2Score(data))
        model = est.estimate()
        model_edges = model.edges()
        G_ = nx.DiGraph()
        G_.add_edges_from(model_edges)
        G_copy = nx.DiGraph()
        G_copy.add_edges_from(G.edges)
        add = []
        add_mut = []
        delete = []
        delete_mut = []
        # a = list(G.edges._adjdict.key())
        for edge in model_edges:
            node1 = edge[0]
            node2 = edge[1]
            if not nx.has_path(G, node2, node1):
                if not G.has_edge(node1, node2):
                    this = (node1, node2)
                    # this = '('+node1+','+node2+')'
                    add.append(this)
                    x = data[node1]
                    mut = mr.mutual_info_score(data[node1], data[node2])
                    add_mut.append(mut)
        seq = list(zip(add_mut, add))
        seq = sorted(seq, key=lambda s: s[0], reverse=True)
        alpha = 0.015
        # if seq[0][0] > alpha:
        #     add = seq[0:1]

        add = seq[0:1]

        data_edges = []
        for edge in G.edges:
            node1 = edge[0]
            node2 = edge[1]
            mut = mr.mutual_info_score(data[node1], data[node2])
            delete_mut.append(mut)
            data_edges.append(edge)
            # if not (nx.has_path(G_, node1, node2) or nx.has_path(G_, node2, node1)):
            #     this = '('+node1+','+node2+')'
            #     delete.append(this)
        seq = list(zip(delete_mut, data_edges))
        seq = sorted(seq, key=lambda s: s[0])

        # if seq[0][0] < alpha:
        #     delete = seq[0:1]
        if len(edges) > 2:
            delete = seq[0:1]
            if len(add) > 0:
                if delete[0][0] > add[0][0]:
                    delete = []

        print('add')
        for i in add:
            print(str(i[1]) + "," + str(i[0]))

        print('delete')
        for j in delete:
            print(str(j[1]) + "," + str(j[0]))
            # print(j[0])

        print('cpt')
        estimator = BayesianEstimator(G, data)
        for i in G.nodes:
            cpd = estimator.estimate_cpd(i, prior_type="K2")
            nodeName = i
            values = dict(data[i].value_counts())
            valueNum = len(values)
            CPT = np.transpose(cpd.values)
            # CPT = cpd.values
            sequence = cpd.variables[1::]
            card = []
            for x in sequence:
                s = len(dict(data[x].value_counts()))
                card.append(s)
            output = nodeName + '\t' + str(valueNum) + '\t' + str(
                CPT.tolist()) + '\t' + str(sequence) + '\t' + str(card)
            print(output)

        print('mutual')
        output1 = []
        for i in range(int(len(edges) / 2)):
            mut = mr.mutual_info_score(data[edges[2 * i]],
                                       data[edges[2 * i + 1]])
            output1.append(mut)
        output2 = {}
        for node1 in G.nodes():
            d = {}
            for node2 in G.nodes():
                if node1 == node2:
                    continue
                mut = mr.mutual_info_score(data[node1], data[node2])

                d[node2] = mut
            output2[node1] = d
        print(output1)
        print(output2)
                if t not in time_edge:
                    time_edge[t] = []
                time_edge[t].append((u, v))

    return graph, time_edge

production_rules = {}

G, T = load_koblenz_quad("../demo_graphs/haggle_contact_koblenz.txt")

print "Source Graph nodes: %d" % G.number_of_nodes()
print "Source Graph edges: %d" % G.number_of_edges()

N = []
GT = nx.DiGraph()
R = []
seen = set()
rhs_dict = {}

uncompressed_rule = []
created_in_rule = {}


def insert_rule(lhs, rhs):
    nodes = lhs.split(",")
    outer_verts = {}
    inner_verts = {}
    i = 0
    if 'S' in lhs:
        outer_verts['S'] = 'S'
q = 0.5  # BSM success probability

Aff = []  #Variables for the LP objective function
vars = []  #LP variables

G = graph_read('Surfnet.graphml.xml')  #Reading the graph
n = len(list(G.nodes()))
#Seed for generating random numbers
seed(85)

for (i, j) in G.edges():
    Arcs.append((i, j, list(G.edges[i, j].values())[0]))
max_len = 10
(Nodes_mod, Arcs_mod) = Mod_net(G, max_len)  #Create the modified network
N = len(Nodes_mod)  #Total number of nodes in the modified network
G_mod = nx.DiGraph()  #Modified network
G_mod.add_nodes_from(Nodes_mod)
G_mod.add_weighted_edges_from(Arcs_mod)

# Demand Creation
for i in range(dem):
    s = 1
    t = 1
    l = randint(min_len, max_len)
    while s == t:
        s = randint(1, n)
        t = randint(1, n)
        D_acc.append((s, t))
    for k in range(l):
        D.append(
            ((s - 1) * (max_len + 1) + 1, (t - 1) * (max_len + 1) + k + 2))
import networkx as nx
import matplotlib.pyplot as plt
import string
import json
import glob
import os

G = nx.DiGraph()  #our graph

filenames = glob.glob("data/*.json")  #getting all files from data


def add_node(name):
    G.add_node(f"r/{name}")


def add_edge(name_1, name_2, weight):
    G.add_edge(f"r/{name_1}", f"r/{name_2}", weight=weight)


def add_node_and_children(filename):
    base_name = os.path.basename(filename)
    base_node_name = os.path.splitext(base_name)[0]

    if not G.has_node(base_node_name):
        add_node(base_node_name)

    with open(filename, 'r') as f:
        child_nodes = json.load(f)
    for node_name, weight in child_nodes:
def create_graph():
    g = nx.DiGraph()
    g.add_node(ROOT, child_count=-1, meta=[])

    return g
def to_networkx(data, node_attrs=None, edge_attrs=None,
                to_undirected: Union[bool, str] = False,
                remove_self_loops: bool = False):
    r"""Converts a :class:`torch_geometric.data.Data` instance to a
    :obj:`networkx.Graph` if :attr:`to_undirected` is set to :obj:`True`, or
    a directed :obj:`networkx.DiGraph` otherwise.

    Args:
        data (torch_geometric.data.Data): The data object.
        node_attrs (iterable of str, optional): The node attributes to be
            copied. (default: :obj:`None`)
        edge_attrs (iterable of str, optional): The edge attributes to be
            copied. (default: :obj:`None`)
        to_undirected (bool or str, optional): If set to :obj:`True` or
            "upper", will return a :obj:`networkx.Graph` instead of a
            :obj:`networkx.DiGraph`. The undirected graph will correspond to
            the upper triangle of the corresponding adjacency matrix.
            Similarly, if set to "lower", the undirected graph will correspond
            to the lower triangle of the adjacency matrix. (default:
            :obj:`False`)
        remove_self_loops (bool, optional): If set to :obj:`True`, will not
            include self loops in the resulting graph. (default: :obj:`False`)
    """
    import networkx as nx

    if to_undirected:
        G = nx.Graph()
    else:
        G = nx.DiGraph()

    G.add_nodes_from(range(data.num_nodes))

    node_attrs, edge_attrs = node_attrs or [], edge_attrs or []

    values = {}
    for key, value in data(*(node_attrs + edge_attrs)):
        if torch.is_tensor(value):
            value = value if value.dim() <= 1 else value.squeeze(-1)
            values[key] = value.tolist()
        else:
            values[key] = value

    to_undirected = "upper" if to_undirected is True else to_undirected
    to_undirected_upper = True if to_undirected == "upper" else False
    to_undirected_lower = True if to_undirected == "lower" else False

    for i, (u, v) in enumerate(data.edge_index.t().tolist()):

        if to_undirected_upper and u > v:
            continue
        elif to_undirected_lower and u < v:
            continue

        if remove_self_loops and u == v:
            continue

        G.add_edge(u, v)

        for key in edge_attrs:
            G[u][v][key] = values[key][i]

    for key in node_attrs:
        for i, feat_dict in G.nodes(data=True):
            feat_dict.update({key: values[key][i]})

    return G
Exemple #27
0
    """
    G = nx.DiGraph()
    for func in func_list:
        G.add_node(func.head.ident.name)
    for func in func_list:
        assert isinstance(func,node.function)
        func_name = func.head.ident.name
        resolve.resolve(func)
        for s in node.postorder(func):
            if (s.__class__ is node.funcall and
                s.func_expr.__class__ is  node.ident and
                s.func_expr.name in G.nodes()):
                G.add_edge(func_name,s.func_expr.name)
    return G

G = nx.DiGraph()

def postorder_edge(u):
    if isinstance(u,node.node):
        for v in u:
            for t in postorder_edge(v):
                yield (v,t)
        yield (u,u) # returns only traversible objects

def foo(tree):
    G = nx.DiGraph()
    for u,v in postorder_edge(tree):
        G.add_edge(id(u),id(v))
    return G

def main():
Exemple #28
0
def _skeleton_graph(project_id,
                    skeleton_ids,
                    confidence_threshold,
                    bandwidth,
                    expand,
                    compute_risk,
                    cable_spread,
                    path_confluence,
                    pre_rel='presynaptic_to',
                    post_rel='postsynaptic_to') -> nx.DiGraph:
    """ Assumes all skeleton_ids belong to project_id. """
    skeletons_string = ",".join(str(int(x)) for x in skeleton_ids)
    cursor = connection.cursor()

    # Fetch all treenodes of all skeletons
    cursor.execute('''
    SELECT id, parent_id, confidence, skeleton_id,
           location_x, location_y, location_z
    FROM treenode
    WHERE skeleton_id IN (%s)
    ''' % skeletons_string)
    rows = tuple(cursor.fetchall())
    # Each skeleton is represented with a DiGraph
    arbors: Union[DefaultDict[Any, nx.DiGraph],
                  Dict[Any, nx.DiGraph]] = defaultdict(nx.DiGraph)

    # Get reviewers for the requested skeletons
    reviews = get_treenodes_to_reviews(skeleton_ids=skeleton_ids)

    # Create a DiGraph for every skeleton
    for row in rows:
        arbors[row[3]].add_node(row[0],
                                **{'reviewer_ids': reviews.get(row[0], [])})

    # Dictionary of skeleton IDs vs list of DiGraph instances
    arbors = split_by_confidence_and_add_edges(confidence_threshold, arbors,
                                               rows)

    # Fetch all synapses
    relations = get_relation_to_id_map(project_id, cursor=cursor)
    cursor.execute('''
    SELECT connector_id, relation_id, treenode_id, skeleton_id
    FROM treenode_connector
    WHERE skeleton_id IN (%s)
      AND (relation_id = %s OR relation_id = %s)
    ''' % (skeletons_string, relations[pre_rel], relations[post_rel]))
    connectors: DefaultDict = defaultdict(partial(defaultdict, list))
    skeleton_synapses: DefaultDict = defaultdict(partial(defaultdict, list))
    for row in cursor.fetchall():
        connectors[row[0]][row[1]].append((row[2], row[3]))
        skeleton_synapses[row[3]][row[1]].append(row[2])

    # Cluster by synapses
    minis: DefaultDict[Any, List] = defaultdict(
        list)  # skeleton_id vs list of minified graphs
    locations = None
    whole_arbors = arbors
    if expand and bandwidth > 0:
        locations = {row[0]: (row[4], row[5], row[6]) for row in rows}
        treenode_connector: DefaultDict[Any, List] = defaultdict(list)
        for connector_id, pp in connectors.items():
            for treenode_id in chain.from_iterable(pp[relations[pre_rel]]):
                treenode_connector[treenode_id].append((connector_id, pre_rel))
            for treenode_id in chain.from_iterable(pp[relations[post_rel]]):
                treenode_connector[treenode_id].append(
                    (connector_id, post_rel))
        arbors_to_expand = {
            skid: ls
            for skid, ls in arbors.items() if skid in expand
        }
        expanded_arbors, minis = split_by_synapse_domain(
            bandwidth, locations, arbors_to_expand, treenode_connector, minis)
        arbors.update(expanded_arbors)

    # Obtain neuron names
    cursor.execute('''
    SELECT cici.class_instance_a, ci.name
    FROM class_instance ci,
         class_instance_class_instance cici
    WHERE cici.class_instance_a IN (%s)
      AND cici.class_instance_b = ci.id
      AND cici.relation_id = %s
    ''' % (skeletons_string, relations['model_of']))
    names = dict(cursor.fetchall())

    # A DiGraph representing the connections between the arbors (every node is an arbor)
    circuit = nx.DiGraph()

    for skid, digraphs in arbors.items():
        base_label = names[skid]
        tag = len(digraphs) > 1
        i = 0
        for g in digraphs:
            if g.number_of_nodes() == 0:
                continue
            if tag:
                label = "%s [%s]" % (base_label, i + 1)
            else:
                label = base_label
            circuit.add_node(
                g,
                **{
                    'id':
                    "%s_%s" % (skid, i + 1),
                    'label':
                    label,
                    'skeleton_id':
                    skid,
                    'node_count':
                    len(g),
                    'node_reviewed_count':
                    sum(
                        1 for v in g.nodes.values()
                        if 0 != len(v.get('reviewer_ids', []))
                    ),  # TODO when bandwidth > 0, not all nodes are included. They will be included when the bandwidth is computed with an O(n) algorithm rather than the current O(n^2)
                    'branch':
                    False,
                })
            i += 1

    # Define edges between arbors, with number of synapses as an edge property
    for c in connectors.values():
        for pre_treenode, pre_skeleton in c[relations[pre_rel]]:
            for pre_arbor in arbors.get(pre_skeleton, ()):
                if pre_treenode in pre_arbor:
                    # Found the DiGraph representing an arbor derived from the skeleton to which the presynaptic treenode belongs.
                    for post_treenode, post_skeleton in c[relations[post_rel]]:
                        for post_arbor in arbors.get(post_skeleton, ()):
                            if post_treenode in post_arbor:
                                # Found the DiGraph representing an arbor derived from the skeleton to which the postsynaptic treenode belongs.
                                edge_props = circuit.get_edge_data(
                                    pre_arbor, post_arbor)
                                if edge_props:
                                    edge_props['c'] += 1
                                    edge_props['pre_treenodes'].append(
                                        pre_treenode)
                                    edge_props['post_treenodes'].append(
                                        post_treenode)
                                else:
                                    circuit.add_edge(
                                        pre_arbor, post_arbor, **{
                                            'c': 1,
                                            'pre_treenodes': [pre_treenode],
                                            'post_treenodes': [post_treenode],
                                            'arrow': 'triangle',
                                            'directed': True,
                                        })
                                break
                    break

    if compute_risk and bandwidth <= 0:
        # Compute synapse risk:
        # Compute synapse centrality of every node in every arbor that has synapses
        for skeleton_id, arbors in whole_arbors.items():
            synapses = skeleton_synapses[skeleton_id]
            pre = synapses[relations[pre_rel]]
            post = synapses[relations[post_rel]]
            for arbor in arbors:
                # The subset of synapses that belong to the fraction of the original arbor
                pre_sub = tuple(treenodeID for treenodeID in pre
                                if treenodeID in arbor)
                post_sub = tuple(treenodeID for treenodeID in post
                                 if treenodeID in arbor)

                totalInputs = len(pre_sub)
                totalOutputs = len(post_sub)
                tc = {treenodeID: Counts() for treenodeID in arbor}

                for treenodeID in pre_sub:
                    tc[treenodeID].outputs += 1

                for treenodeID in post_sub:
                    tc[treenodeID].inputs += 1

                # Update the nPossibleIOPaths field in the Counts instance of each treenode
                _node_centrality_by_synapse(arbor, tc, totalOutputs,
                                            totalInputs)

                arbor.treenode_synapse_counts = tc

        if not locations:
            locations = {row[0]: (row[4], row[5], row[6]) for row in rows}

        # Estimate the risk factor of the edge between two arbors,
        # as a function of the number of synapses and their location within the arbor.
        # Algorithm by Casey Schneider-Mizell
        # Implemented by Albert Cardona
        for pre_arbor, post_arbor, edge_props in circuit.edges(data=True):
            if pre_arbor == post_arbor:
                # Signal autapse
                edge_props['risk'] = -2
                continue

            try:
                spanning = spanning_tree(post_arbor,
                                         edge_props['post_treenodes'])
                # for arbor in whole_arbors[circuit[post_arbor]['skeleton_id']]:
                #     if post_arbor == arbor:
                #         tc = arbor.treenode_synapse_counts
                tc = post_arbor.treenode_synapse_counts
                count = spanning.number_of_nodes()
                if count < 3:
                    median_synapse_centrality = sum(
                        tc[treenodeID].synapse_centrality
                        for treenodeID in spanning.nodes) / count
                else:
                    median_synapse_centrality = sorted(
                        tc[treenodeID].synapse_centrality
                        for treenodeID in spanning.nodes)[count / 2]
                cable = cable_length(spanning, locations)
                if -1 == median_synapse_centrality:
                    # Signal not computable
                    edge_props['risk'] = -1
                else:
                    edge_props['risk'] = 1.0 / sqrt(
                        pow(cable / cable_spread, 2) +
                        pow(median_synapse_centrality / path_confluence, 2)
                    )  # NOTE: should subtract 1 from median_synapse_centrality, but not doing it here to avoid potential divisions by zero
            except Exception as e:
                logging.getLogger(__name__).error(e)
                # Signal error when computing
                edge_props['risk'] = -3

    if expand and bandwidth > 0:
        # Add edges between circuit nodes that represent different domains of the same neuron
        for skeleton_id, list_mini in minis.items():
            for mini in list_mini:
                for node in mini.nodes:
                    g = mini.nodes[node]['g']
                    if 1 == len(g) and next(
                            g.nodes(data=True))[1].get('branch'):
                        # A branch node that was preserved in the minified arbor
                        circuit.add_node(
                            g,
                            **{
                                'id': '%s-%s' % (skeleton_id, node),
                                'skeleton_id': skeleton_id,
                                'label':
                                "",  # "%s [%s]" % (names[skeleton_id], node),
                                'node_count': 1,
                                'branch': True,
                            })
                for node1, node2 in mini.edges:
                    g1 = mini.nodes[node1]['g']
                    g2 = mini.nodes[node2]['g']
                    circuit.add_edge(
                        g1, g2, **{
                            'c': 10,
                            'arrow': 'none',
                            'directed': False
                        })

    return circuit
Exemple #29
0
def visualizeGraphList0(EW):
    Edges = EW[0]
    Weight = EW[1]
    # sorting edges first within each edge and then according to the first element.
    Edges_sorted = sorted([ sorted(x, key = lambda s: s[0]) for x in Edges],\
                           key = lambda t: t[0])
    #print(Edges_sorted)
    # making a list of new edges with nodes being matrices;
    #[new edge, label, concatinated nodes of new edge, original edge];
    Edges_labeled = [[[x[0][0]+str(x[0][1]), x[1][0]+str(x[1][1])] ,\
                      '[' + x[0][2]+str(x[0][3])+':'+ x[1][2]+str(x[1][3])+']' ,\
           x[0][0]+str(x[0][1]) +x[1][0]+str(x[1][1]) , x] for x in Edges_sorted]
    #print(Edges_labeled)

    # sorting wrt concatinated nodes of new edges.
    ### not sure if we really need this because it may not change anything.
    Edges_new = sorted(Edges_labeled, key=lambda t: t[2])
    #print(Edges_new)
    # if a node has @, we get the original name back,
    #because we do not put @s' together... Endpoints are separated.
    for x in Edges_new:
        for i in [0, 1]:
            if x[0][i][0] == '@':
                x[0][i] += x[3][i][2] + str(x[3][i][3])
            else:
                pass
    #print(Edges_new)
    # adding technical nodes for drawing in case of self loops.
    for x in Edges_new:
        if x[0][0] == x[0][1]:
            x[0][0] += ' Loop'
        else:
            pass

    # putting double edges together by grouping only w.r.t. new edges of string-format.
    # [[new edge, [[connection],[connection],,,]...],]
    Compressed = [[key, [a[1] for a in group]]
                  for key, group in groupby(Edges_new, lambda x: x[0])]
    # making a new list of information for each edge because double-edges were compressed.
    # [[new edge, '[connection][connection]...'],]
    Compressed2 = []
    for x in Compressed:
        y = ''
        # finding out all connections between the same two matrices.
        for z in x[1]:
            y += z
        Compressed2.append([x[0], y])

    # generating a directed graph from the above new edges and setting the color to be red.
    H = nx.DiGraph()
    for x in Compressed2:
        H.add_edge(x[0][0], x[0][1], name=x[1], color='r')
    # putting that color information into the list.
    Colors_Edges = [H[u][v]['color'] for u, v in H.edges()]

    # defining colors of nodes.
    Color_nodes = []
    for g in H:
        if g[-4:] == 'Loop':
            Color_nodes.append('orange')
        else:
            Color_nodes.append('yellow')

    # drawing.
    pos = nx.circular_layout(H)
    nx.draw(H,
            pos,
            with_labels=True,
            node_color=Color_nodes,
            edge_color=Colors_Edges)
    nx.draw_networkx_labels(H, pos)
    edge_labels = nx.get_edge_attributes(H, 'name')
    nx.draw_networkx_edge_labels(H, pos, edge_labels)
    plt.show()
    print(Weight)
Exemple #30
0
 def test_reverse3(self):
     H = nx.DiGraph()
     H.add_nodes_from([1, 2, 3, 4])
     HR = H.reverse()
     assert sorted(HR.nodes()) == [1, 2, 3, 4]