def fluctuability(netin, calc='global'): """ Fluctuability of temporal networks. This is the variation of the network's edges over time. THis is the unique number of edges through time divided by the overall number of edges. Parameters ---------- netin : array or dict Temporal network input (graphlet or contact) (nettype: 'bd', 'bu', 'wu', 'wd') calc : str Version of fluctuabiility to calcualte. 'global' Returns ------- fluct : array Fluctuability """ # Get input type (C or G) netin, netinfo = utils.process_input(netin, ['C', 'G', 'TO']) netin[netin != 0] = 1 unique_edges = np.sum(netin, axis=2) unique_edges[unique_edges > 0] = 1 unique_edges[unique_edges == 0] = 0 fluct = (np.sum(unique_edges)) / np.sum(netin) return fluct
def sid(tnet, communities, axis=0, calc='overtime', decay=0): r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : array a Nx1 vector or NxT array of community assignment. axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'overtime' returns SID over time (a 1 x community vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. 'community_pairs_norm' (returns a community x time matrix). Which is the normalized average of each community pair. Each pair is normalized to the average of both communities in the pair. decay: int if calc = 'community_pairs' or 'community_avg', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D(t+1) = e^{-decay}D(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Notes ------ SID tries to quantify if there is more segergation or intgration compared to other time-points. If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual. There are three different variants of SID, one is a global measure (calc='overtime'), the second is a value per community (calc='community_avg'), the third is a value for each community-community pairing (calc='community_pairs'). First we calculate the temporal strength for each edge. This is calculate by .. math:: S_{i,t} = \sum_j G_{i,j,t} The pairwise SID, when the network is undirected, is calculated by .. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t}) Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A. When calculating the SID for a community, it is calculated byL .. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t}) Where C is the number of communities. When calculating the SID globally, it is calculated byL .. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t}) References ----------- .. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] """ tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN']) D = temporal_degree_centrality( tnet, calc='pertime', communities=communities, decay=decay) # Check network output (order of communitiesworks) network_ids = np.unique(communities) communities_size = np.array([sum(communities == n) for n in network_ids]) sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]]) for n in network_ids: for m in network_ids: betweenmodulescaling = 1/(communities_size[n]*communities_size[m]) if netinfo['nettype'][1] == 'd': withinmodulescaling = 1 / \ (communities_size[n]*communities_size[n]) withinmodulescaling_m = 1 / (communities_size[m]*communities_size[m]) elif netinfo['nettype'][1] == 'u': withinmodulescaling = 2 / \ (communities_size[n]*(communities_size[n]-1)) withinmodulescaling_m = 2 / (communities_size[m]*(communities_size[m]-1)) if n == m: betweenmodulescaling = withinmodulescaling if calc == 'community_pairs_norm': # Here normalize by avg of n and m sid[n, m, :] = ((withinmodulescaling * D[n, n, :]) + (withinmodulescaling_m * D[m, m, :])) / 2 - betweenmodulescaling * D[n, m, :] else: sid[n, m, :] = withinmodulescaling * \ D[n, n, :] - betweenmodulescaling * D[n, m, :] # If nans emerge than there is no connection between networks at time point, so make these 0. sid[np.isnan(sid)] = 0 if calc == 'communities_avg': return np.sum(sid, axis=axis) elif calc == 'overtime': return np.sum(np.sum(sid, axis=1), axis=0) else: return sid
def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False): r""" Calculates the temporal participation coefficient Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes. Parameters ---------- tnet : array, dict graphlet or contact sequence input. Only positive matrices considered. communities : array community vector. Either 1D (node) community index or 2D (node,time). removeneg : bool (default false) If true, all values < 0 are made to be 0. Returns ------- P : array participation coefficient Notes ----- Static participatoin coefficient is: .. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2 Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_ This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t. If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in. References ---------- .. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_] """ if communities is None: if isinstance(tnet, dict): if 'communities' in tnet.keys(): communities = tnet['communities'] else: raise ValueError('Community index not found') else: raise ValueError('Community must be provided for graphlet input') # Get input in right format tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': # TODO add contingency when hdf5 data has negative edges if not tnet.hdf5 and tnet.sparse: if sum(tnet.network['weight'] < 0) > 0 and not removeneg: print( 'TENETO WARNING: negative edges exist when calculating participation coefficient.' ) else: tnet.network['weight'][tnet.network['weight'] < 0] = 0 if not tnet.hdf5 and not tnet.sparse: if np.sum(tnet.network < 0) > 0 and not removeneg: print( 'TENETO WARNING: negative edges exist when calculating participation coefficient.' ) else: tnet.network[tnet.network < 0] = 0 part = np.zeros([tnet.netshape[0], tnet.netshape[1]]) if len(communities.shape) == 1: for t in np.arange(0, tnet.netshape[1]): C = communities snapshot = tnet.get_network_when(t=t) if tnet.nettype[1] == 'd': i_at_t = snapshot['i'].values else: i_at_t = np.concatenate( [snapshot['i'].values, snapshot['j'].values]) i_at_t = np.unique(i_at_t).tolist() i_at_t = list(map(int, i_at_t)) for i in i_at_t: # Calculate degree of node if tnet.nettype[1] == 'd': df = tnet.get_network_when(i=i, t=t) j_at_t = df['j'].values if tnet.nettype == 'wd': k_i = df['weight'].sum() elif tnet.nettype == 'bd': k_i = len(df) elif tnet.nettype[1] == 'u': df = tnet.get_network_when(ij=i, t=t) j_at_t = np.concatenate([df['i'].values, df['j'].values]) if tnet.nettype == 'wu': k_i = df['weight'].sum() elif tnet.nettype == 'bu': k_i = len(df) j_at_t = list(map(int, j_at_t)) for c in np.unique(C[j_at_t]): ci = np.where(C == c)[0].tolist() k_is = tnet.get_network_when(i=i, j=ci, t=t) if tnet.nettype[1] == 'u' and tnet.sparse: k_is2 = tnet.get_network_when(j=i, i=ci, t=t) k_is = pd.concat([k_is, k_is2]) if len(k_is) > 0: if tnet.nettype[0] == 'b': k_is = len(k_is) else: k_is = k_is['weight'].sum() part[i, t] += np.square(k_is / k_i) part[i_at_t, t] = 1 - part[i_at_t, t] print(part) if decay is not None and t > 0: part[i_at_t, t] += decay * part[i_at_t, t - 1] else: for t in np.arange(0, tnet.netshape[1]): snapshot = tnet.get_network_when(t=t) if tnet.nettype[1] == 'd': i_at_t = snapshot['i'].values else: i_at_t = np.concatenate( [snapshot['i'].values, snapshot['j'].values]) i_at_t = np.unique(i_at_t).tolist() i_at_t = list(map(int, i_at_t)) for i in i_at_t: for tc in np.arange(0, tnet.netshape[1]): C = communities[:, tc] # Calculate degree of node if tnet.nettype[1] == 'd': df = tnet.get_network_when(i=i, t=t) j_at_t = df['j'].values if tnet.nettype[0] == 'w': k_i = df['weight'].sum() elif tnet.nettype[0] == 'b': k_i = len(df) elif tnet.nettype[1] == 'u': df = tnet.get_network_when(ij=i, t=t) j_at_t = np.concatenate( [df['i'].values, df['j'].values]) if tnet.nettype == 'wu': k_i = df['weight'].sum() elif tnet.nettype == 'bu': k_i = len(df) j_at_t = list(map(int, j_at_t)) for c in np.unique(C[j_at_t]): ci = np.where(C == c)[0].tolist() k_is = tnet.get_network_when(i=i, j=ci, t=t) if tnet.nettype[1] == 'u' and tnet.sparse: k_is2 = tnet.get_network_when(j=i, i=ci, t=t) k_is = pd.concat([k_is, k_is2]) if tnet.nettype[0] == 'b': k_is = len(k_is) else: k_is = k_is['weight'].sum() part[i, t] += np.square(k_is / k_i) part[i, t] = part[i, t] / tnet.netshape[1] part[i_at_t, t] = 1 - part[i_at_t, t] if decay is not None and t > 0: part[i_at_t, t] += decay * part[i_at_t, t - 1] # Set any division by 0 to 0 part[np.isnan(part) == 1] = 0 return part
def temporal_part_coef(net, communities=None, removeneg=False): ''' Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes. Static participatoin coefficient is: $P_i = 1 - sum_s^{N_M}({(k_is)/k_i}^2)$ Where s is the index of each community (N_M). k_i is total degree of node. And k_is is degree of connections within community. This "temporal" version only loops through temporal snapshots and calculates P_i for each t. Parameters ---------- net : array, dict graphlet or contact sequence input. Only positive matrices considered. communities : array community vector. Either 1D (node) community index or 2D (node,time). removeneg : bool (default false) If true, all values < 0 are made to be 0. Note ---- If directed, function sums axis=1, so G may want to be transposed before hand depending on what type of directed part_coef you are interested in. Note ---- Adding negative connections is easy possible addition. Returns ------- P : array participation coefficient Source ------ Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. ''' if communities is None: if isinstance(net, dict): if 'communities' in net.keys(): communities = net['communities'] else: raise ValueError('Community index not found') else: raise ValueError('Community must be provided for graphlet input') # Get input in right format net, netinfo = utils.process_input(net, ['C', 'G', 'TO']) if np.sum(net < 0) > 0 and not removeneg: raise ValueError('Negative connections found') if removeneg: net[net < 0] = 0 k_is = np.zeros([netinfo['netshape'][0], netinfo['netshape'][2]]) part = np.ones([netinfo['netshape'][0], netinfo['netshape'][2]]) for t in np.arange(0, netinfo['netshape'][2]): if len(communities.shape) == 2: C = communities[:, t] else: C = communities for i in np.unique(C): k_is[:, t] += np.square(np.sum(net[:, C == i, t], axis=1)) part = part - (k_is / np.square(np.sum(net, axis=1))) # Set any division by 0 to 0 part[np.isnan(part) == 1] = 0 return part
def temporal_participation_coeff(tnet, communities=None, decay=None, removeneg=False): r''' Temporal participation coefficient is a measure of diversity of connections across communities for individual nodes. Parameters ---------- tnet : array, dict graphlet or contact sequence input. Only positive matrices considered. communities : array community vector. Either 1D (node) community index or 2D (node,time). removeneg : bool (default false) If true, all values < 0 are made to be 0. Returns ------- P : array participation coefficient Notes ----- Static participatoin coefficient is: .. math:: P_i = 1 - \sum_s^{N_M}({{k_{is}}\over{k_i}})^2 Where s is the index of each community (:math:`N_M`). :math:`k_i` is total degree of node. And :math:`k_{is}` is degree of connections within community.[part-1]_ This "temporal" version only loops through temporal snapshots and calculates :math:`P_i` for each t. If directed, function sums axis=1, so tnet may need to be transposed before hand depending on what type of directed part_coef you are interested in. References ---------- .. [part-1] Guimera et al (2005) Functional cartography of complex metabolic networks. Nature. 433: 7028, p895-900. [`Link <http://doi.org/10.1038/nature03288>`_] ''' if communities is None: if isinstance(tnet, dict): if 'communities' in tnet.keys(): communities = tnet['communities'] else: raise ValueError('Community index not found') else: raise ValueError('Community must be provided for graphlet input') # Get input in right format tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') if tnet.nettype[0] == 'w': # TODO add contingency when hdf5 data has negative edges if tnet.hdf5 == False: if sum(tnet.network['weight'] < 0) > 0 and not removeneg: print( 'TENETO WARNING: negative edges exist when calculating participation coefficient.') else: tnet.network['weight'][tnet.network['weight'] < 0] = 0 part = np.zeros([tnet.netshape[0], tnet.netshape[1]]) if len(communities.shape) == 1: for t in np.arange(0, tnet.netshape[1]): C = communities snapshot = tnet.get_network_when(t=t) if tnet.nettype[1] == 'd': i_at_t = snapshot['i'].values else: i_at_t = np.concatenate( [snapshot['i'].values, snapshot['j'].values]) i_at_t = np.unique(i_at_t).tolist() i_at_t = list(map(int, i_at_t)) for i in i_at_t: # Calculate degree of node if tnet.nettype[1] == 'd': df = tnet.get_network_when(i=i, t=t) j_at_t = df['j'].values if tnet.nettype == 'wd': k_i = df['weight'].sum() elif tnet.nettype == 'bd': k_i = len(df) elif tnet.nettype[1] == 'u': df = tnet.get_network_when(ij=i, t=t) j_at_t = np.concatenate([df['i'].values, df['j'].values]) if tnet.nettype == 'wu': k_i = df['weight'].sum() elif tnet.nettype == 'bu': k_i = len(df) j_at_t = list(map(int, j_at_t)) for c in np.unique(C[j_at_t]): ci = np.where(C == c)[0].tolist() k_is = tnet.get_network_when(i=i, j=ci, t=t) if tnet.nettype[1] == 'u': k_is2 = tnet.get_network_when(j=i, i=ci, t=t) k_is = pd.concat([k_is, k_is2]) if len(k_is) > 0: if tnet.nettype[0] == 'b': k_is = len(k_is) else: k_is = k_is['weight'].sum() part[i, t] += np.square(k_is/k_i) part[i_at_t, t] = 1 - part[i_at_t, t] if decay is not None and t > 0: part[i_at_t, t] += decay*part[i_at_t, t-1] else: for t in np.arange(0, tnet.netshape[1]): snapshot = tnet.get_network_when(t=t) if tnet.nettype[1] == 'd': i_at_t = snapshot['i'].values else: i_at_t = np.concatenate( [snapshot['i'].values, snapshot['j'].values]) i_at_t = np.unique(i_at_t).tolist() i_at_t = list(map(int, i_at_t)) for i in i_at_t: for tc in np.arange(0, tnet.netshape[1]): C = communities[:, tc] # Calculate degree of node if tnet.nettype[1] == 'd': df = tnet.get_network_when(i=i, t=t) j_at_t = df['j'].values if tnet.nettype == 'wd': k_i = df['weight'].sum() elif tnet.nettype == 'bd': k_i = len(df) elif tnet.nettype[1] == 'u': df = tnet.get_network_when(ij=i, t=t) j_at_t = np.concatenate( [df['i'].values, df['j'].values]) if tnet.nettype == 'wu': k_i = df['weight'].sum() elif tnet.nettype == 'bu': k_i = len(df) j_at_t = list(map(int, j_at_t)) for c in np.unique(C[j_at_t]): ci = np.where(C == c)[0].tolist() k_is = tnet.get_network_when(i=i, j=ci, t=t) if tnet.nettype[1] == 'u': k_is2 = tnet.get_network_when(j=i, i=ci, t=t) k_is = pd.concat([k_is, k_is2]) if tnet.nettype[0] == 'b': k_is = len(k_is) else: k_is = k_is['weight'].sum() part[i, t] += np.square(k_is/k_i) part[i, t] = part[i, t] / tnet.netshape[1] part[i_at_t, t] = 1 - part[i_at_t, t] if decay is not None and t > 0: part[i_at_t, t] += decay*part[i_at_t, t-1] # Set any division by 0 to 0 part[np.isnan(part) == 1] = 0 return part
def shortest_temporal_path(tnet, steps_per_t='all', i=None, j=None, it=None, minimise='temporal_distance'): """ Shortest temporal path Parameters -------------- tnet : tnet obj, array or dict input network. nettype: bu, bd. steps_per_t : int or str If str, should be 'all'. How many edges can be travelled during a single time-point. i : list List of node indicies to restrict analysis. These are nodes the paths start from. Default is all nodes. j : list List of node indicies to restrict analysis. There are nodes the paths end on. Default is all nodes. it : None, int, list Time points for parts. Either None (default) which takes all time points, an integer to indicate which time point to start at, or a list of time-points that is included in analysis (including end time-point). minimise : str Can be "temporal_distance", returns the path that has the smallest temporal distance. It is possible there can be a path that is a smaller topological distance (this option currently not available). Returns ------------------- paths : pandas df Dataframe consisting of information about all the paths found. Notes --------------- The shortest temporal path calculates the temporal and topological distance there to be a path between nodes. The argument steps_per_t allows for multiple nodes to be travelled per time-point. Topological distance is the number of edges that are travelled. Temporal distance is the number of time-points. This function returns the path that is the shortest temporal distance away. Examples -------- Let us start by creating a small network. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> import teneto >>> G = np.zeros([4, 4, 3]) >>> G[0, 1, [0, 2]] = 1 >>> G[0, 3, [2]] = 1 >>> G[1, 2, [1]] = 1 >>> G[2, 3, [1]] = 1 Let us look at this network to see what is there. >>> fig, ax = plt.subplots(1) >>> ax = teneto.plot.slice_plot(G, ax, nodelabels=[0,1,2,3], timelabels=[0,1,2], cmap='Set2') >>> plt.tight_layout() >>> fig.show() .. plot:: import numpy as np import matplotlib.pyplot as plt import teneto G = np.zeros([4, 4, 3]) G[0, 1, [0, 2]] = 1 G[0, 3, [2]] = 1 G[1, 2, [1]] = 1 G[2, 3, [1]] = 1 fig,ax = plt.subplots(1) teneto.plot.slice_plot(G,ax,nodelabels=[0,1,2,3],timelabels=[0,1,2],cmap='Set2') plt.tight_layout() fig.show() Here we can visualize what the shortest paths are. Let us start by starting at node 0 we want to find the path to node 3, starting at time 0. To do this we write: >>> sp = teneto.networkmeasures.shortest_temporal_path(G, i=0, j=3, it=0) >>> sp['temporal-distance'] 0 2 Name: temporal-distance, dtype: int64 >>> sp['topological-distance'] 0 3 Name: topological-distance, dtype: int64 >>> sp['path includes'] 0 [[0, 1], [1, 2], [2, 3]] Name: path includes, dtype: object Here we see that the shortest path takes 3 steps (topological distance of 3) at 2 time points. It starts by going from node 0 to 1 at t=0, then 1 to 2 and 2 to 3 at t=1. We can see all the nodes that were travelled in the "path includes" list. In the above example, it was possible to traverse multiple edges at a single time-point. It is possible to restrain that by setting the steps_per_t argument >>> sp = teneto.networkmeasures.shortest_temporal_path(G, i=0, j=3, it=0, steps_per_t=1) >>> sp['temporal-distance'] 0 3 Name: temporal-distance, dtype: int64 >>> sp['topological-distance'] 0 1 Name: topological-distance, dtype: int64 >>> sp['path includes'] 0 [[0, 3]] Name: path includes, dtype: object Here we see that the path is now only one edge, 0 to 3 at t=2. The quicker path is no longer possible. """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # If i, j or it are inputs, process them if i is None: source_nodes = np.arange(tnet.netshape[0]) elif isinstance(i, int): source_nodes = [i] elif isinstance(i, list): source_nodes = i else: raise ValueError('Unknown i input. Should be None, int or list') if j is None: target_nodes = np.arange(tnet.netshape[0]) elif isinstance(j, int): target_nodes = [j] elif isinstance(j, list): target_nodes = j else: raise ValueError('Unknown j input. Should be None, int or list') if it is None: time_points = np.arange(tnet.netshape[1]) elif isinstance(it, int): time_points = [it] elif isinstance(it, list): time_points = it else: raise ValueError('Unknown t input. Should be None, int or list') # Two step process. # First, get what the network can reach per timepoint. # Second, check all possible sequences of what the network can reach for the shortest sequence. paths = [] for source in source_nodes: for target in target_nodes: if target == source: pass else: for tstart in time_points: # Part 1 starts here ij = [source] t = tstart step = 1 lenij = 1 pairs = [] stop = 0 while stop == 0: # Only select i if directed, ij if undirected. if tnet.nettype[1] == 'u': network = tnet.get_network_when(ij=list(ij), t=t) elif tnet.nettype[1] == 'd': network = tnet.get_network_when(i=list(ij), t=t) new_nodes = network[['i', 'j']].values if len(new_nodes) != 0: pairs.append(new_nodes.tolist()) new_nodes = new_nodes.flatten() ij = np.hstack([ij, new_nodes]) ij = np.unique(ij) if minimise == 'temporal_distance' and target in ij: stop = 1 elif minimise == 'topology' and t == tnet.netshape[ 1] and target in ij: stop = 1 elif t == tnet.netshape[1]: t = np.nan ij = [target] stop = 1 else: if len(ij) == lenij: t += 1 step = 1 elif steps_per_t == 'all': pass elif step < steps_per_t: step += 1 else: t += 1 step = 1 if t == tnet.netshape[1]: t = np.nan ij = [target] stop = 1 lenij = len(ij) # correct t for return # Only run if one pair is added. t += 1 # part 2 starts here path = np.nan path_length = np.nan for n in itertools.product(*reversed(pairs)): a = np.array(n).flatten() if source not in a or target not in a: pass else: pathtmp = shortest_path_from_pairseq(a, source) if pathtmp: if not isinstance(path, list): path = pathtmp path_length = len(path) elif len(pathtmp) < path_length: path = pathtmp path_length = len(path) elif len(pathtmp) == path_length: if isinstance(path[0][0], list): if pathtmp in path: pass else: path.append(pathtmp) else: if path == pathtmp: pass else: path = [path, pathtmp] # elif sourcei < 2 and target in a[:2]: # path_length = 2 paths.append([ source, target, tstart, t - tstart, path_length, path ]) paths = pd.DataFrame(data=paths, columns=[ 'from', 'to', 't_start', 'temporal-distance', 'topological-distance', 'path includes' ]) return paths
def temporal_louvain(tnet, resolution=1, intersliceweight=1, n_iter=100, negativeedge='ignore', randomseed=None, consensus_threshold=0.5, temporal_consensus=True, njobs=1): r""" Louvain clustering for a temporal network. Parameters ----------- tnet : array, dict, TemporalNetwork Input network resolution : int resolution of Louvain clustering ($\gamma$) intersliceweight : int interslice weight of multilayer clustering ($\omega$). Must be positive. n_iter : int Number of iterations to run louvain for randomseed : int Set for reproduceability negativeedge : str If there are negative edges, what should be done with them. Options: 'ignore' (i.e. set to 0). More options to be added. consensus : float (0.5 default) When creating consensus matrix to average over number of iterations, keep values when the consensus is this amount. Returns ------- communities : array (node,time) node,time array of community assignment Notes ------- References ---------- """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # Divide resolution by the number of timepoints #resolution = resolution / tnet.T supranet = create_supraadjacency_matrix(tnet, intersliceweight=intersliceweight) if negativeedge == 'ignore': supranet = supranet[supranet['weight'] > 0] nxsupra = tnet_to_nx(supranet) np.random.seed(randomseed) i = 0 while True: print(i) i += 1 comtmp = [] if njobs > 1: with ProcessPoolExecutor(max_workers=njobs) as executor: job = { executor.submit(_run_louvain, nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter) } for j in as_completed(job): comtmp.append(j.result()) comtmp = np.stack(comtmp) else: comtmp = np.array([ _run_louvain(nxsupra, resolution, tnet.N, tnet.T) for n in range(n_iter) ]) comtmp = np.stack(comtmp) comtmp = comtmp.transpose() comtmp = np.reshape(comtmp, [tnet.N, tnet.T, n_iter], order='F') # if n_iter == 1: # break nxsupra_old = nxsupra nxsupra = make_consensus_matrix(comtmp, consensus_threshold) # If there was no consensus, there are no communities possible, return if nxsupra is None: break if (nx.to_numpy_array(nxsupra, nodelist=np.arange( tnet.N * tnet.T)) == nx.to_numpy_array( nxsupra_old, nodelist=np.arange(tnet.N * tnet.T))).all(): break communities = comtmp[:, :, 0] if temporal_consensus: communities = make_temporal_consensus(communities) return communities
def temporal_degree_centrality(net, axis=0, calc='avg', communities=None, subnet=None, decay=None, ignorediagonal=True): """ temporal degree of network. Sum of all connections each node has through time. **PARAMETERS** :net: temporal network input (graphlet or contact). :nettype: 'bu', 'bd', 'wu', 'wd' :axis: Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str options: 'avg', 'time', 'module_degree_zscore' 'avg' (returns temporal degree centrality (a 1xnode vector)) 'time' (returns a node x time matrix), 'module_degree_zscore' returns the Z-scored within community degree centrality (communities argument required). This is done for each time-point i.e. 'time' returns static degree centrality per time-point. ignorediagonal: bool if true, diagonal is made to 0. communities : array (Nx1) Vector of community assignment. If this is given and calc='time', then the strength within and between each communities is returned (technically not degree centrality). subnet : array (Nx1) Vector of community assignment (deprecated) decay : int if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. If decay is 0 then the final D will equal D when calc='avg', if decay = inf then this will equal calc='time'. **OUTPUT** D : array temporal degree centrality (nodal measure). Array is 1D ('avg'), 2D ('time', 'module_degree_zscore') or 3D ('time' + communities (non-nodal/community measures)) **SEE ALSO** - *temporal_closeness_centrality* """ if subnet is not None: warnings.warn( "Subnet argument will be removed in v0.3.5. Use communities instead.", FutureWarning) communities = subnet # Get input in right format net, netinfo = utils.process_input(net, ['C', 'G', 'TO']) if ignorediagonal: net = utils.set_diagonal(net, 0) # sum sum net if calc == 'time' and communities is None: tdeg = np.squeeze(np.sum(net, axis=axis)) elif calc == 'module_degree_zscore' and communities is None: raise ValueError( 'Communities must be specified when calculating module degree z-score.' ) elif calc != 'time' and communities is None: tdeg = np.sum(np.sum(net, axis=2), axis=axis) elif calc == 'module_degree_zscore' and communities is not None: tdeg = np.zeros([net.shape[0], net.shape[2]]) for t in range(net.shape[2]): if len(communities.shape) == 2: C = communities[:, t] else: C = communities for c in np.unique(C): k_i = np.sum(net[:, C == c, t][C == c], axis=axis) tdeg[C == c, t] = (k_i - np.mean(k_i)) / np.std(k_i) tdeg[np.isnan(tdeg) == 1] = 0 elif calc == 'time' and communities is not None: tdeg_communities = np.zeros([ communities.max() + 1, communities.max() + 1, communities.shape[-1] ]) if len(communities.shape) == 2: for t in range(len(communities[-1])): C = communities[:, t] unique_communities = np.unique(C) for s1 in unique_communities: for s2 in unique_communities: tdeg_communities[s1, s2, t] = np.sum(np.sum(net[C == s1, :, t][:, C == s2], axis=1), axis=0) else: unique_communities = np.unique(communities) tdeg_communities = [ np.sum(np.sum( net[communities == s1, :, :][:, communities == s2, :], axis=1), axis=0) for s1 in unique_communities for s2 in unique_communities ] tdeg = np.array(tdeg_communities) tdeg = np.reshape(tdeg, [ len(np.unique(communities)), len(np.unique(communities)), net.shape[-1] ]) # Divide diagonal by 2 if undirected to correct for edges being present twice if netinfo['nettype'][1] == 'u': for s in range(tdeg.shape[0]): tdeg[s, s, :] = tdeg[s, s, :] / 2 else: raise ValueError("invalid calc argument") if decay and calc == 'time': #Reshape so that time is first dimensions tdeg = tdeg.transpose( np.hstack([len(tdeg.shape) - 1, np.arange(len(tdeg.shape) - 1)])) for n in range(1, tdeg.shape[0]): tdeg[n] = np.exp(-decay) * tdeg[n - 1] + tdeg[n] tdeg = tdeg.transpose(np.hstack([np.arange(1, len(tdeg.shape)), 0])) elif decay: print( 'WARNING: decay cannot be applied unless calc=time, ignoring decay' ) return tdeg
def shortest_temporal_path(tnet, steps_per_t='all', i=None, j=None, it=None, minimise='time'): """ Shortest temporal path Parameters -------------- tnet : tnet obj, array or dict input network. nettype: bu, bd. steps_per_t : int or str If str, should be 'all'. How many edges can be travelled during a single time-point. i : list List of node indicies to restrict analysis. These are nodes the paths start from. Default is all nodes. j : list List of node indicies to restrict analysis. There are nodes the paths end on. Default is all nodes. it : list List of starting time-point indicies to restrict anlaysis. Default is all timepoints. Returns paths : pandas df Dataframe consisting of information about all the paths found. """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # If i, j or it are inputs, process them if i is None: source_nodes = np.arange(tnet.netshape[0]) elif isinstance(i, int): source_nodes = [i] elif isinstance(i, list): source_nodes = i else: raise ValueError('Unknown i input. Should be None, int or list') if j is None: target_nodes = np.arange(tnet.netshape[0]) elif isinstance(j, int): target_nodes = [j] elif isinstance(j, list): target_nodes = j else: raise ValueError('Unknown j input. Should be None, int or list') if it is None: time_points = np.arange(tnet.netshape[1]) elif isinstance(it, int): time_points = [it] elif isinstance(it, list): time_points = it else: raise ValueError('Unknown t input. Should be None, int or list') # Two step process. # First, get what the network can reach per timepoint. # Second, check all possible sequences of what the network can reach for the shortest sequence. paths = [] for source in source_nodes: for target in target_nodes: if target == source: pass else: for tstart in time_points: # Part 1 starts here ij = [source] t = tstart step = 1 lenij = 1 pairs = [] stop = 0 while stop == 0: # Only select i if directed, ij if undirected. if tnet.nettype[1] == 'u': network = tnet.get_network_when(ij=list(ij), t=t) elif tnet.nettype[1] == 'd': network = tnet.get_network_when(i=list(ij), t=t) new_nodes = network[['i', 'j']].values if len(new_nodes) != 0: pairs.append(new_nodes.tolist()) new_nodes = new_nodes.flatten() ij = np.hstack([ij, new_nodes]) ij = np.unique(ij) if minimise == 'time' and target in ij: stop = 1 elif minimise == 'topology' and t == tnet.netshape[ 1] and target in ij: stop = 1 elif t == tnet.netshape[1]: t = np.nan ij = [target] stop = 1 else: if len(ij) == lenij: t += 1 step = 1 elif steps_per_t == 'all': pass elif step < steps_per_t: step += 1 else: t += 1 step = 1 lenij = len(ij) # correct t for return t += 1 # Path 2 starts here path = np.nan pl = np.nan for n in itertools.product(*reversed(pairs)): a = np.array(n).flatten() if source not in a or target not in a: pass else: pathtmp = shortest_path_from_pairseq(a, source) if pathtmp: if not isinstance(path, list): path = pathtmp pl = len(path) elif len(pathtmp) < pl: path = pathtmp pl = len(path) elif len(pathtmp) == pl: if isinstance(path[0][0], list): if pathtmp in path: pass else: path.append(pathtmp) else: if path == pathtmp: pass else: path = [path, pathtmp] #elif sourcei < 2 and target in a[:2]: # pl = 2 paths.append( [source, target, tstart, t - tstart, pl, path]) paths = pd.DataFrame(data=paths, columns=[ 'from', 'to', 't_start', 'temporal-distance', 'topological-distance', 'path includes' ]) return paths
def shortest_temporal_path(tnet, steps_per_t='all', i=None, j=None, it=None, minimise='time'): """ Shortest temporal path Parameters -------------- tnet : tnet obj, array or dict input network. nettype: bu, bd. steps_per_t : int or str If str, should be 'all'. How many edges can be travelled during a single time-point. i : list List of node indicies to restrict analysis. These are nodes the paths start from. Default is all nodes. j : list List of node indicies to restrict analysis. There are nodes the paths end on. Default is all nodes. it : list List of starting time-point indicies to restrict anlaysis. Default is all timepoints. minimise : str Can be "time", returns the path that has the smallest temporal distance. It is possible there can be a path that is a smaller topological distance (this option currently not available). Returns ------------------- paths : pandas df Dataframe consisting of information about all the paths found. Notes --------------- The shortest temporal path calculates the temporal and topological distance there to be a path between nodes. The argument steps_per_t allows for multiple nodes to be travelled per time-point. Topological distance is the number of edges that are travelled. Temporal distance is the number of time-points. This function returns the path that is the shortest temporal distance away. Examples -------- Let us start by creating a small network. >>> import numpy as np >>> import matplotlib.pyplot as plt >>> import teneto >>> G = np.zeros([4, 4, 3]) >>> G[0, 1, [0, 2]] = 1 >>> G[0, 3, [2]] = 1 >>> G[1, 2, [1]] = 1 >>> G[2, 3, [1]] = 1 Let us look at this network to see what is there. >>> fig, ax = plt.subplots(1) >>> ax = teneto.plot.slice_plot(G, ax, nodelabels=[0,1,2,3], timelabels=[0,1,2], cmap='Set2') >>> plt.tight_layout() >>> fig.show() .. plot:: import numpy as np import matplotlib.pyplot as plt import teneto G = np.zeros([4, 4, 3]) G[0, 1, [0, 2]] = 1 G[0, 3, [2]] = 1 G[1, 2, [1]] = 1 G[2, 3, [1]] = 1 fig,ax = plt.subplots(1) teneto.plot.slice_plot(G,ax,nodelabels=[0,1,2,3],timelabels=[0,1,2],cmap='Set2') plt.tight_layout() fig.show() Here we can visualize what the shortest paths are. Let us start by starting at node 0 we want to find the path to node 3, starting at time 0. To do this we write: >>> sp = teneto.networkmeasures.shortest_temporal_path(G, i=0, j=3, it=0) >>> sp['temporal-distance'] 0 2 Name: temporal-distance, dtype: int64 >>> sp['topological-distance'] 0 3 Name: topological-distance, dtype: int64 >>> sp['path includes'] 0 [[0, 1], [1, 2], [2, 3]] Name: path includes, dtype: object Here we see that the shortest path takes 3 steps (topological distance of 3) at 2 time points. It starts by going from node 0 to 1 at t=0, then 1 to 2 and 2 to 3 at t=1. We can see all the nodes that were travelled in the "path includes" list. In the above example, it was possible to traverse multiple edges at a single time-point. It is possible to restrain that by setting the steps_per_t argument >>> sp = teneto.networkmeasures.shortest_temporal_path(G, i=0, j=3, it=0, steps_per_t=1) >>> sp['temporal-distance'] 0 3 Name: temporal-distance, dtype: int64 >>> sp['topological-distance'] 0 1 Name: topological-distance, dtype: int64 >>> sp['path includes'] 0 [[0, 3]] Name: path includes, dtype: object Here we see that the path is now only one edge, 0 to 3 at t=2. The quicker path is no longer possible. """ tnet = process_input(tnet, ['C', 'G', 'TN'], 'TN') # If i, j or it are inputs, process them if i is None: source_nodes = np.arange(tnet.netshape[0]) elif isinstance(i, int): source_nodes = [i] elif isinstance(i, list): source_nodes = i else: raise ValueError('Unknown i input. Should be None, int or list') if j is None: target_nodes = np.arange(tnet.netshape[0]) elif isinstance(j, int): target_nodes = [j] elif isinstance(j, list): target_nodes = j else: raise ValueError('Unknown j input. Should be None, int or list') if it is None: time_points = np.arange(tnet.netshape[1]) elif isinstance(it, int): time_points = [it] elif isinstance(it, list): time_points = it else: raise ValueError('Unknown t input. Should be None, int or list') # Two step process. # First, get what the network can reach per timepoint. # Second, check all possible sequences of what the network can reach for the shortest sequence. paths = [] for source in source_nodes: for target in target_nodes: if target == source: pass else: for tstart in time_points: # Part 1 starts here ij = [source] t = tstart step = 1 lenij = 1 pairs = [] stop = 0 while stop == 0: # Only select i if directed, ij if undirected. if tnet.nettype[1] == 'u': network = tnet.get_network_when(ij=list(ij), t=t) elif tnet.nettype[1] == 'd': network = tnet.get_network_when(i=list(ij), t=t) new_nodes = network[['i', 'j']].values if len(new_nodes) != 0: pairs.append(new_nodes.tolist()) new_nodes = new_nodes.flatten() ij = np.hstack([ij, new_nodes]) ij = np.unique(ij) if minimise == 'time' and target in ij: stop = 1 elif minimise == 'topology' and t == tnet.netshape[1] and target in ij: stop = 1 elif t == tnet.netshape[1]: t = np.nan ij = [target] stop = 1 else: if len(ij) == lenij: t += 1 step = 1 elif steps_per_t == 'all': pass elif step < steps_per_t: step += 1 else: t += 1 step = 1 lenij = len(ij) # correct t for return t += 1 # Path 2 starts here path = np.nan pl = np.nan for n in itertools.product(*reversed(pairs)): a = np.array(n).flatten() if source not in a or target not in a: pass else: pathtmp = shortest_path_from_pairseq(a, source) if pathtmp: if not isinstance(path, list): path = pathtmp pl = len(path) elif len(pathtmp) < pl: path = pathtmp pl = len(path) elif len(pathtmp) == pl: if isinstance(path[0][0], list): if pathtmp in path: pass else: path.append(pathtmp) else: if path == pathtmp: pass else: path = [path, pathtmp] # elif sourcei < 2 and target in a[:2]: # pl = 2 paths.append([source, target, tstart, t-tstart, pl, path]) paths = pd.DataFrame(data=paths, columns=[ 'from', 'to', 't_start', 'temporal-distance', 'topological-distance', 'path includes']) return paths
def intercontacttimes(netin): """ Calculates the intercontacttimes of each edge in a network **PARAMETERS** :netin: Temporal network (craphlet or contact). :nettype: 'bu', 'bd' **OUTPUT** :contacts: intercontact times as numpy array :format: dictionary **NOTES** Connections are assumed to be binary **SEE ALSO** *bursty_coeff* **History** :Modified: Dec 2016, WHT :Created: Nov 2016, WHT """ # Process input netin, netinfo = utils.process_input(netin, ['C', 'G', 'TO']) if netinfo['nettype'][0] == 'd': print( 'WARNING: assuming connections to be binary when computing intercontacttimes' ) # Each time series is padded with a 0 at the start and end. Then t[0:-1]-[t:]. # Then discard the noninformative ones (done automatically) # Finally return back as np array contacts = np.array([[None] * netinfo['netshape'][0]] * netinfo['netshape'][1]) if netinfo['nettype'][1] == 'u': for i in range(0, netinfo['netshape'][0]): for j in range(i + 1, netinfo['netshape'][0]): edge_on = np.where(netin[i, j, :] > 0)[0] edge_on = np.append(0, edge_on) edge_on = np.append(edge_on, 0) edge_on_diff = edge_on[2:-1] - edge_on[1:-2] contacts[i, j] = np.array(edge_on_diff) contacts[j, i] = np.array(edge_on_diff) elif netinfo['nettype'][1] == 'd': for i in range(0, netinfo['netshape'][0]): for j in range(0, netinfo['netshape'][0]): edge_on = np.where(netin[i, j, :] > 0)[0] edge_on = np.append(0, edge_on) edge_on = np.append(edge_on, 0) edge_on_diff = edge_on[2:-1] - edge_on[1:-2] contacts[i, j] = np.array(edge_on_diff) out = {} out['intercontacttimes'] = contacts out['nettype'] = netinfo['nettype'] return out
def sid(tnet, communities, axis=0, calc='global', decay=0): r""" Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength.[sid-1]_ Parameters ---------- tnet: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : array a Nx1 vector or NxT array of community assignment. axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'global' returns temporal degree centrality (a 1xnode vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. decay: int if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Notes ------ SID tries to quantify if there is more segergation or intgration compared to other time-points. If SID > 0, then there is more segregation than usual. If SID < 0, then there is more integration than usual. There are three different variants of SID, one is a global measure (calc='global'), the second is a value per community (calc='community_avg'), the third is a value for each community-community pairing (calc='community_pairs'). First we calculate the temporal strength for each edge. This is calculate by .. math:: S_{i,t} = \sum_j G_{i,j,t} The pairwise SID, when the network is undirected, is calculated by .. math:: SID_{A,B,t} = ({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_B}}) S_{A,B,t}) Where :math:`S_{A,t}` is the average temporal strength at time-point t for community A. :math:`N_A` is the number of nodes in community A. When calculating the SID for a community, it is calculated byL .. math:: SID_{A,t} = \sum_b^C({2 \over {N_A (N_A - 1)}}) S_{A,t} - ({{1} \over {N_A * N_b}}) S_{A,b,t}) Where C is the number of communities. When calculating the SID globally, it is calculated byL .. math:: SID_{t} = \sum_a^C\sum_b^C({2 \over {N_a (N_a - 1)}}) S_{A,t} - ({{1} \over {N_a * N_b}}) S_{a,b,t}) References ----------- .. [sid-1] Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. Neuroimage. 178. [`Link <https://www.sciencedirect.com/science/article/pii/S1053811918304476>`_] """ tnet, netinfo = utils.process_input(tnet, ['C', 'G', 'TN']) D = temporal_degree_centrality( tnet, calc='time', communities=communities, decay=decay) # Check network output (order of communitiesworks) network_ids = np.unique(communities) communities_size = np.array([sum(communities == n) for n in network_ids]) sid = np.zeros([network_ids.max()+1, network_ids.max()+1, tnet.shape[-1]]) for n in network_ids: for m in network_ids: betweenmodulescaling = 1/(communities_size[n]*communities_size[m]) if netinfo['nettype'][1] == 'd': withinmodulescaling = 1 / \ (communities_size[n]*communities_size[n]) elif netinfo['nettype'][1] == 'u': withinmodulescaling = 2 / \ (communities_size[n]*(communities_size[n]-1)) if n == m: betweenmodulescaling = withinmodulescaling sid[n, m, :] = withinmodulescaling * \ D[n, n, :] - betweenmodulescaling * D[n, m, :] # If nans emerge than there is no connection between networks at time point, so make these 0. sid[np.isnan(sid)] = 0 if calc == 'global': return np.sum(np.sum(sid, axis=1), axis=0) elif calc == 'communities_avg': return np.sum(sid, axis=axis) else: return sid
def shortest_temporal_path(netin, quiet=1): """ Calculates the shortest temporal path when all possible routes can be travelled at each time point. Currently only works for binary undirected edges (but can be expanded). **PARAMETERS** :netin: temporal network input (graphlet or contact) :nettype: 'bu' :quiet: quiet (default = 1). Turn to 0 if you want progree update. **OUTPUT** :paths_dict: shortest temporal paths :format: dictionary Paths are of the struction, path['paths'][i,j,t] - shortest path for i to reach j, starting at time t. **NOTE** This function assumes all paths can be taken per time point. In a future update, this function temporalPaths will allow for only a portion of edges to be travelled per time point. This will be implmeneted with no change to the funcitonality of calling this function as it is today, with the defaults being all edges can be travelled. **SEE ALSO** - *temporal_efficiency* - *reachability_latency* - *temporal_closeness_centrality* **HISTORY** Modified - Aug 2017, WHT (PEP8) Modified - Dec 2016, WHT (documentation) Created - Nov 2016, WHT """ # Get input type (C or G) # Process input netin, netinfo = utils.process_input(netin, ['C', 'G', 'TO']) if netinfo['nettype'] != 'bu': errormsg = ('It looks like your graph is not binary and undirected. ' 'Shortest temporal paths can only be calculated for ' 'binary undirected networks in Teneto at the moment. ' 'If another type is required, please create an issue at: ' 'github.com/wiheto/teneto.') raise ValueError(errormsg) # Preallocate output paths = np.zeros(netinfo['netshape']) * np.nan # Go backwards in time and see if something is reached paths_last_contact = np.zeros( [netinfo['netshape'][0], netinfo['netshape'][1]]) * np.nan for t_ind in list(reversed(range(0, netinfo['netshape'][2]))): if quiet == 0: print('--- Running for time: ' + str(t_ind) + ' ---') fid = np.where(netin[:, :, t_ind] >= 1) # Update time step # Note to self: Add a conditional to prevent nan warning # that can pop out if no path is there straight away. paths_last_contact += 1 # Reset connections present to 1s paths_last_contact[fid[0], fid[1]] = 1 # Update nodes with no connections # Nodes to update are nodes with an edge present at the time point for i in np.unique(fid[0]): connections = np.where(paths_last_contact[i, :] == 1)[0] #paths_last_contact_preupdate = np.array(paths_last_contact[i,:]) paths_last_contact[i, :] = np.nanmin( paths_last_contact[np.hstack([connections, i]), :], axis=0) # make self connection nan regardless paths_last_contact[i, i] = np.nan paths[:, :, t_ind] = paths_last_contact # Return output paths_dict = {} percentreach = (paths.size - np.sum(np.isnan(paths))) / \ (paths.size - (paths.shape[0] * paths.shape[2])) paths_dict['percentReached'] = percentreach paths_dict['paths'] = paths paths_dict['nettype'] = netinfo['nettype'] return paths_dict
def sid(net, communities, subnet=None, axis=0, calc='global', decay=None): """ Segregation integration difference (SID). An estimation of each community or global difference of within versus between community strength. Parameters ---------- net: array, dict Temporal network input (graphlet or contact). Allowerd nettype: 'bu', 'bd', 'wu', 'wd' communities : a Nx1 vector or NxT array of community assignment. subnet : array a Nx1 vector or NxT array of community assignment (will be removed in v0.3.5). axis : int Dimension that is returned 0 or 1 (default 0). Note, only relevant for directed networks. i.e. if 0, node i has Aijt summed over j and t. and if 1, node j has Aijt summed over i and t. calc : str 'global' returns temporal degree centrality (a 1xnode vector) (default); 'community_pairs' returns a community x community x time matrix, which is the SID for each community pairing; 'community_avg' (returns a community x time matrix). Which is the normalized average of each community to all other communities. decay: str if calc = 'time', then decay is possible where the centrality of the previous time point is carried over to the next time point but decays at a value of $e^decay$ such that the temporal centrality measure becomes: $D_d(t+1) = e^{-decay}D_d(t) + D(t+1)$. Returns ------- sid: array segregation-integration difference. Format: 2d or 3d numpy array (depending on calc) representing (community,community,time) or (community,time) Source ------ Fransson et al (2018) Brain network segregation and integration during an epoch-related working memory fMRI experiment. https://www.biorxiv.org/content/early/2018/01/23/252338 """ if subnet is not None: warnings.warn( "Subnet argument will be removed in v0.3.5. Use communities instead.", FutureWarning) communities = subnet net, netinfo = utils.process_input(net, ['C', 'G', 'TO']) D = temporal_degree_centrality(net, calc='time', communities=communities, decay=decay) # Check network output (order of communitiesworks) network_ids = np.unique(communities) communities_size = np.array([sum(communities == n) for n in network_ids]) sid = np.zeros( [network_ids.max() + 1, network_ids.max() + 1, net.shape[-1]]) for n in network_ids: for m in network_ids: betweenmodulescaling = 1 / (communities_size[n] * communities_size[m]) if netinfo['nettype'][1] == 'd': withinmodulescaling = 1 / (communities_size[n] * communities_size[n]) elif netinfo['nettype'][1] == 'u': withinmodulescaling = 2 / (communities_size[n] * (communities_size[n] - 1)) if n == m: betweenmodulescaling = withinmodulescaling sid[n, m, :] = withinmodulescaling * D[ n, n, :] - betweenmodulescaling * D[n, m, :] # If nans emerge than there is no connection between networks at time point, so make these 0. sid[np.isnan(sid)] = 0 if calc == 'global': return np.sum(np.sum(sid, axis=1), axis=0) elif calc == 'communities_avg': return np.sum(sid, axis=axis) else: return sid