def check_aperiodic(g):
    a = gt.adjacency(g)
    b = a * a
    diag_two_sum = b.diagonal().sum()
    # print '\tA*A diag sum:', int(diag_two_sum)
    b *= a
    diag_three_sum = b.diagonal().sum()
    return bool(diag_two_sum) and bool(diag_three_sum)
Esempio n. 2
0
    def feat_diffusion(cls, X, g=None, D_inv=None, A=None, iter=10):
        if iter != 0:
            if g is None and D is None and A is None:
                print('input at least either g or D & A')
                return None

            if A is None:
                A = gt.adjacency(g)
            if D_inv is None:
                # TODO: maybe need to change here when using undirected graph
                D_inv = np.diag(1.0 / g.get_in_degrees(g.get_vertices()))

            for i in range(iter):
                X = D_inv.dot(A.dot(X))
    def _gen(self, gname: str, gen_id: int) -> nx.Graph:
        import graph_tool.all as gt  # local import

        assert 'state' in self.params, 'missing parameter: state for SBM'
        state = self.params['state']

        gen_gt_g = gt.generate_sbm(
            state.b.a,
            gt.adjacency(state.get_bg(),
                         state.get_ers()).T)  # returns a graphtool graph
        g = graphtool_to_networkx(gen_gt_g)
        g.name = gname
        g.gen_id = gen_id

        return g
Esempio n. 4
0
def get_modified_adjacency_matrix(g, k):
    # Get regular adjacency matrix
    adj = gt.adjacency(g)

    # Initialize the modified adjacency matrix
    X = np.zeros(adj.shape)

    # Loop over nonzero elements
    for i, j in zip(*adj.nonzero()):
        X[i, j] = 1 / adj[i, j]

    adj_max = adj.max()

    # Loop over zero elements
    for i, j in set(itertools.product(range(adj.shape[0]), range(adj.shape[1]))).difference(zip(*adj.nonzero())):
        X[i, j] = k * adj_max

    return X
Esempio n. 5
0
def adjacency(gdf: gpd.GeoDataFrame, directed: bool=False, weight: str=None, include_vertices_position: bool=False) -> (csr_matrix, list):
    """create weighted adjacency matrix from graph.
    Adjacency matrix is a square matrix of |vertices| x |vertices| indicating edges as an integer.

    Parameters
    ----------
    gdf : :class:`~geopandas.GeoDataFrame`
        A geopandas dataframe with the list of edges
    directed : bool (default to False)
        is the graph directed
    weight :  string (default to None)
       column of the geodataframe to use as a weight
    include_vertices_position :boolean (default to False)
        also return a dictionary of attributes for the vertices ({v:(x,y)}
    Returns
    -------
    adj : :class:`~scipy.sparse.csr_matrix`

    """
    g = gt.Graph(directed=directed)

    if weight is not None:
        if weight not in gdf.columns:
            logging.error('{0} is not in geodataframe columns {1}'.format(weight,gdf.columns))
            return None, None

        data_type_obj = gdf.dtypes[weight]
        if data_type_obj == np.int:
            weight_prop = g.new_edge_property('int')
        else:
            weight_prop = g.new_edge_property('float')

        edgelist = gdf.reset_index()[['u','v',weight]].values
        nodes_id = g.add_edge_list(edgelist, hashed=True, eprops=[weight,])
    else:
        edgelist = gdf.reset_index()[['u','v']].values
        nodes_id = g.add_edge_list(edgelist, hashed=True)

    if not include_vertices_position:
        return gt.adjacency(g,weight_prop), None
    if
e5 = G.add_edge(v3, v5)
e6 = G.add_edge(v5, v3)
e7 = G.add_edge(v1, v3)
e8 = G.add_edge(v3, v1)

vlen = len(G.get_vertices())

#Figure 1
gt.graph_draw(G,
              vertex_text=G.vertex_index,
              vertex_font_size=18,
              output_size=(800, 800),
              output="example2.pdf")

#A will be the adjacency matrix of G
A = gt.adjacency(G)

with open('log.txt', 'w') as file_w:
    file_w.write("A_G=\n")
    file_w.write(str(A.todense().transpose()))
    file_w.write("\n")

L = gt.laplacian(G, deg='total', normalized=False, weight=None,
                 index=None)  # unnormalized Laplace
L = L - A.transpose()  #L

with open('log.txt', 'ab') as file_w:
    file_w.write("L=\n")
    file_w.write(str(L.todense()))
    file_w.write("\n")
Esempio n. 7
0
vcc_path.remove(num_vert)


g.remove_vertex(num_vert+1)
g.remove_vertex(num_vert)

plane = [i for i in range(Lx*Ly,2*Lx*Ly+1) if i in g.vertex(i-Lx*Ly).all_neighbours() and i in vcc_path]

top = range(Lx*Ly)
bot = range(l-Lx*Ly,l)

others = vcc_path-set(top)-set(bot)
fixed = set(range(l))-others

adj = gt.adjacency(g)
del g

resnet.csr_rows_set_nz_to_val(adj,fixed) # nodes not in vcc_path or those on the top/bottom

diag = adj.sum(axis=1)

log.info("creating PETSc structures")
b = PETSc.Vec().createSeq(l)
x = PETSc.Vec().createSeq(l)
#A = PETSc.Mat().createAIJ(size=adj.shape,nnz=7,csr=(adj.indptr,adj.indices,-adj.data))
A = PETSc.Mat().createAIJ(size=adj.shape,nnz=7)

# the gm factor here and below doesn't actually matter
# scaling each resistor by the same constant still produces
# the same distribution of voltages
Esempio n. 8
0
def weisfeiler_lehman_subtree_kernel(graph_db, hashed_attributes, *kwargs):
    iterations = kwargs[0]
    compute_gram_matrix = kwargs[1]
    normalize_gram_matrix = kwargs[2]
    use_labels = kwargs[3]

    # Create one empty feature vector for each graph
    feature_vectors = []
    for _ in graph_db:
        feature_vectors.append(np.zeros(0, dtype=np.float64))

    # Construct block diagonal matrix of all adjacency matrices
    adjacency_matrices = []
    for g in graph_db:
        adjacency_matrices.append(gt.adjacency(g))
    M = sp.sparse.block_diag(tuple(adjacency_matrices),
                             dtype=np.float64,
                             format="csr")
    num_vertices = M.shape[0]

    # Load list of precalculated logarithms of prime numbers
    log_primes = log_pl.log_primes[0:num_vertices]

    # Color vector representing labels
    colors_0 = np.zeros(num_vertices, dtype=np.float64)
    # Color vector representing hashed attributes
    colors_1 = hashed_attributes

    # Get labels (colors) from all graph instances
    offset = 0
    graph_indices = []

    for g in graph_db:
        if use_labels == 1:
            for i, v in enumerate(g.vertices()):
                colors_0[i + offset] = g.vp.nl[v]
        if use_labels == 2:
            for i, v in enumerate(g.vertices()):
                colors_0[i + offset] = v.out_degree()

        graph_indices.append((offset, offset + g.num_vertices() - 1))
        offset += g.num_vertices()

    # Map labels to [0, number_of_colors)
    if use_labels:
        _, colors_0 = np.unique(colors_0, return_inverse=True)

    for it in range(0, iterations + 1):

        if use_labels:
            # Map colors into a single color vector
            if len(colors_1) > 0:
                colors_all = np.array([colors_0, colors_1])
            else:
                colors_all = np.array([colors_0])
            colors_all = [hash(tuple(row)) for row in colors_all.T]
            _, colors_all = np.unique(colors_all, return_inverse=True)
            max_all = int(np.amax(colors_all) + 1)
            # max_all = int(np.amax(colors_0) + 1)

            feature_vectors = [
                np.concatenate((feature_vectors[i],
                                np.bincount(colors_all[index[0]:index[1] + 1],
                                            minlength=max_all)))
                for i, index in enumerate(graph_indices)
            ]

            # Avoid coloring computation in last iteration
            if it < iterations:
                colors_0 = compute_coloring(M, colors_0,
                                            log_primes[0:len(colors_0)])
                if len(colors_1) > 0:
                    colors_1 = compute_coloring(M, colors_1,
                                                log_primes[0:len(colors_1)])
        else:
            max_1 = int(np.amax(colors_1) + 1)

            feature_vectors = [
                np.concatenate((feature_vectors[i],
                                np.bincount(colors_1[index[0]:index[1] + 1],
                                            minlength=max_1)))
                for i, index in enumerate(graph_indices)
            ]

            # Avoid coloring computation in last iteration
            if it < iterations:
                colors_1 = compute_coloring(M, colors_1,
                                            log_primes[0:len(colors_1)])

    if not compute_gram_matrix:
        return feature_vectors

#return lil.lil_matrix(feature_vectors, dtype=np.float64)
    else:
        # Make feature vectors sparse
        gram_matrix = csr.csr_matrix(feature_vectors, dtype=np.float64)
        # Compute gram matrix
        gram_matrix = gram_matrix.dot(gram_matrix.T)

        gram_matrix = gram_matrix.toarray()

        if normalize_gram_matrix:
            return aux.normalize_gram_matrix(gram_matrix)
        else:
            return gram_matrix
def generate_power_law_bipartite_net(N, frac_left_node, gamma, ave_deg,
                                     min_deg_left, min_deg_right, node_class):
    """
    Generate power law bipartite network.

    Params
    ------
    N : int
        Number of nodes
    frac_left_node : float
        Fraction of nodes on the left part.
    gamma : float
        Power-law exponent (same expoent for both sides)
    ave_deg : float
        Average degree
    min_deg_left : int
        Minimum degree for nodes on the left part
    min_deg_right : int
        Minimum degree for nodes on the right part
    node_class : list of str
        Name of the class for the left and right nodes
        node_class[0] : str for left nodes.
        node_class[1] : str for right nodes.

    Return
    ------
    G : networkx.Graph
    """
    def zipf(a, min, max, size=None):
        """
        Generate Zipf-like random variables,
        but in inclusive [min...max] interval
        """
        v = np.arange(min, max + 1)  # values to sample
        p = 1.0 / np.power(v, a)  # probabilities
        p /= np.sum(p)  # normalized
        return np.random.choice(v, size=size, replace=True, p=p)

    def add_n_stabs(deg, n):
        """
        Add n stabs to degree sequence
        """
        stubs = np.random.choice(np.arange(len(deg)),
                                 size=int(n),
                                 replace=True,
                                 p=deg / np.sum(deg))
        for s in stubs:
            deg[s] += 1
        return deg

    def to_graphical_deg_seq(deg_left, deg_right):
        """
        Make the degree sequence to be graphical
        by adding edges
        """
        deg_left_sum = np.sum(deg_left)
        deg_right_sum = np.sum(deg_right)

        if deg_left_sum < deg_right_sum:
            deg_left = add_n_stabs(deg_left, deg_right_sum - deg_left_sum)
        elif deg_left_sum > deg_right_sum:
            deg_right = add_n_stabs(deg_right, deg_left_sum - deg_right_sum)

        return deg_left, deg_right

    # Compute the number of nodes
    N_left = int(N * frac_left_node)
    N_right = N - N_left

    # Generate degree sequence
    deg_left = zipf(3, min_deg_left, N_right, size=N_left)
    deg_right = zipf(3, min_deg_right, N_left, size=N_right)

    # Rescale such that the average degree is the prescribed average degree
    E = ave_deg * (N_left + N_right)
    deg_left = np.clip(np.round(deg_left * E / np.sum(deg_left)), min_deg_left,
                       N_right)
    deg_right = np.clip(np.round(deg_right * E / np.sum(deg_right)),
                        min_deg_right, N_left)

    # Make them graphical degree sequences
    deg_left, deg_right = to_graphical_deg_seq(deg_left, deg_right)

    # Prepare parameters for graph-tool
    E = np.sum(deg_right)
    gt_params = {
        "out_degs":
        np.concatenate([np.zeros_like(deg_left), deg_right]).astype(int),
        "in_degs":
        np.concatenate([deg_left, np.zeros_like(deg_right)]).astype(int),
        "b":
        np.concatenate([np.zeros(N_left), np.ones(N_right)]),
        "probs":
        np.array([[0, 0], [E, 0]]),
        "directed":
        True,
        "micro_degs":
        True,
    }

    # Generate the network until the degree sequence
    # satisfied the thresholds
    while True:
        g = gt.generate_sbm(**gt_params)
        A = gt.adjacency(g).T
        A.data = np.ones_like(A.data)
        outdeg = np.array(A.sum(axis=1)).reshape(-1)[N_left:]
        indeg = np.array(A.sum(axis=0)).reshape(-1)[:N_left]
        if (np.min(indeg) >= min_deg_left) and (np.min(outdeg) >=
                                                min_deg_right):
            break

    # Convert to the networkx objet
    G = nx.from_scipy_sparse_matrix(A, create_using=nx.Graph)

    # Add attributes to the nodes
    node_attr = {i: node_class[int(i > N_left)] for i in range(N)}
    nx.set_node_attributes(G, node_attr, "class")
    return G
Esempio n. 10
0
def main():
    import sys
    import os.path
    import glob
    import itertools
    from argparse import ArgumentParser

    parser = ArgumentParser(
        description='Read a graph, and produce a layout with t-SNE.')

    # Input
    parser.add_argument(
        'graphs',
        nargs='+',
        help='(List of) input graph(s). Or a folder with graphs.')

    # Output
    parser.add_argument('-o',
                        default='./output',
                        help='Folder to write output to. Default: ./output')
    parser.add_argument('--save_every',
                        type=int,
                        help='Save a jpg snapshot ever x epochs.')
    parser.add_argument(
        '--render_video',
        action='store_true',
        help=
        'Render a video of the layout evolution. Needs ImageMagick and ffmpeg.'
    )
    parser.add_argument(
        '--retain_snaps',
        action='store_true',
        help=
        'Retain the snapshots. This argument is ignored if no video is rendered.'
    )
    parser.add_argument(
        '--save_layout_data',
        action='store_true',
        help='Save all layout coordinates in a .pickle file and a .txt file.')
    parser.add_argument('--opacity',
                        type=float,
                        default=0.3,
                        help='Edge opacity.')

    # Manipulations to graph
    parser.add_argument(
        '--strip_graph',
        action='store_true',
        help='Retain only the largest connected component in the graph.')
    parser.add_argument('--rnd_seed',
                        '-r',
                        type=int,
                        nargs='+',
                        default=[None],
                        help='Seed for random state. (Default: Random seed)')
    parser.add_argument(
        '--pre_sfdp',
        action='store_true',
        help=
        'If this flag is given, the vertices will be pre-initialized with SFDP.'
    )
    parser.add_argument('--only_sfdp',
                        action='store_true',
                        help='If this flag is given, only SFDP will be done.')
    parser.add_argument(
        '--accept_all_sfdp',
        action='store_true',
        help=
        'If this flag is given, no confirmation is asked for the SFDP layouts.'
    )
    parser.add_argument(
        '--remove_rnd_edges',
        nargs='+',
        type=float,
        default=[0],
        help=
        'Mutate the graph by removing random edges. If this is used without a random seed, a random random seed will be generated. The value given to this argument is the fraction of edges that will be removed.'
    )

    # Hyperparameters
    parser.add_argument('--n_epochs',
                        '-e',
                        nargs='+',
                        type=int,
                        default=[1000],
                        help='One or more numbers of t-SNE epochs.')
    parser.add_argument('--lr_init',
                        nargs='+',
                        type=float,
                        default=[80],
                        help='One or more initial learning rates.')
    parser.add_argument(
        '--lr_final',
        nargs='+',
        type=float,
        default=[None],
        help='One or more final learning rates. Default: Same as lr_init.')
    parser.add_argument('--lr_switch',
                        nargs='+',
                        type=int,
                        default=[None],
                        help='One or more learning rate switch-points.')
    parser.add_argument('--momentum_init',
                        nargs='+',
                        type=float,
                        default=[0.5],
                        help='One or more initial momenta.')
    parser.add_argument('--momentum_final',
                        nargs='+',
                        type=float,
                        default=[0.5],
                        help='One or more initial momenta.')
    parser.add_argument('--momentum_switch',
                        nargs='+',
                        type=int,
                        default=[None],
                        help='One or more momentum switch-points.')

    # Distance metric parameters
    parser.add_argument(
        '--distance_metric',
        '-d',
        choices=['shortest_path', 'spdm', 'modified_adjacency', 'mam'],
        default='spdm',
        help='The distance metric that is used for the pairwise distances.')
    parser.add_argument('-k',
                        nargs='+',
                        type=float,
                        default=[1],
                        help='Exponent for transfer function.')

    # Cost function parameters
    #   Kullback-Leibler
    parser.add_argument('--perplexity',
                        '-p',
                        nargs='+',
                        type=float,
                        default=[80],
                        help='One or more perplexities.')
    parser.add_argument('--l_kl_init',
                        nargs='+',
                        type=float,
                        default=[1],
                        help='One or more KL factors.')
    parser.add_argument('--l_kl_final',
                        nargs='+',
                        type=float,
                        default=[1],
                        help='One or more KL factors.')
    parser.add_argument('--l_kl_switch',
                        nargs='+',
                        type=int,
                        default=[None],
                        help='One or more KL switch-points')
    #   Edge contraction
    parser.add_argument('--l_e_init',
                        nargs='+',
                        type=float,
                        default=[0],
                        help='One or more edge contraction factors.')
    parser.add_argument('--l_e_final',
                        nargs='+',
                        type=float,
                        default=[0],
                        help='One or more edge contraction factors.')
    parser.add_argument('--l_e_switch',
                        nargs='+',
                        type=int,
                        default=[None],
                        help='One or more edge contraction switch-points')
    #   Compression
    parser.add_argument('--l_c_init',
                        nargs='+',
                        type=float,
                        default=[1.2],
                        help='One or more compression factors.')
    parser.add_argument('--l_c_final',
                        nargs='+',
                        type=float,
                        default=[0],
                        help='One or more compression factors.')
    parser.add_argument('--l_c_switch',
                        nargs='+',
                        type=int,
                        default=[None],
                        help='One or more compression switch-points')
    #   Repulsion
    parser.add_argument('--l_r_init',
                        nargs='+',
                        type=float,
                        default=[0],
                        help='One or more repulsion factors.')
    parser.add_argument('--l_r_final',
                        nargs='+',
                        type=float,
                        default=[0.5],
                        help='One or more repulsion factors.')
    parser.add_argument('--l_r_switch',
                        nargs='+',
                        type=int,
                        default=[None],
                        help='One or more repulsion switch-points')
    parser.add_argument(
        '--r_eps',
        nargs='+',
        type=float,
        default=[0.2],
        help='Additional term in denominator to prevent near-singularities.')

    args = parser.parse_args()

    # Retrieve a list of all files in the directory, if args.graphs[0] is a directory.
    if len(args.graphs) == 1 and os.path.isdir(args.graphs[0]):
        args.graphs = glob.glob(args.graphs[0] + '/*')

    # Check graph input
    for g_file in args.graphs:
        if not os.path.isfile(g_file):
            raise FileNotFoundError(g_file + ' is not a file.')

    # Generate random random seed if none is given.
    if args.rnd_seed == [None]:
        args.rnd_seed = [np.random.randint(1e8)]

    # Ignore retain_snaps argument if no video is rendered.
    if not args.render_video:
        args.retain_snaps = True

    # Get names of the graphs (by splitting of path and extension)
    names = [
        os.path.split(os.path.splitext(file)[0])[1] for file in args.graphs
    ]

    # Determine output folders. One is created in the specified output folder
    # for every graph that is supplied.
    output_folders = [args.o + '/' + name for name in names]

    # Check (and possibly create) output folders
    for folder in [args.o] + output_folders:
        if not os.path.exists(folder):
            os.makedirs(folder)

    # At least everything is fine for now.
    there_were_exceptions = False

    # Loop over all graphs (and their respective output folders)
    for g_file, g_name, output_folder in zip(args.graphs, names,
                                             output_folders):
        # Load the graph
        g = graph_io.load_graph(g_file)
        print(
            '[tsnetwork] Loaded graph {0} (|V| = {1}, |E| = {2}) into memory.'.
            format(g_name, g.num_vertices(), g.num_edges()))

        # Add graph name as propery in the internal representation
        g.graph_properties['name'] = g.new_graph_property('string', g_name)

        # Usually this loop has just one iteration, with only 0 as the value
        # for rmv_edge_frac (that is, no edges are removed).
        for rmv_edge_frac in args.remove_rnd_edges:
            print(
                '[tsnetwork] Original graph: (|V|, |E|) = ({0}, {1}).'.format(
                    g.num_vertices(), g.num_edges()))

            # Create a temporary copy of the graph that will be manipulated.
            gv = gt.GraphView(g)

            # Remove rmv_edge_frac of the graphs edges from gv.
            gv.clear_filters()
            gv.reindex_edges()
            edge_list = list(gv.edges())
            not_here_ep = gv.new_edge_property('bool', val=True)
            n_remove_edges = int(rmv_edge_frac * gv.num_edges())
            for e in np.random.randint(0, gv.num_edges(), n_remove_edges):
                not_here_ep[edge_list[e]] = False
            gv.set_edge_filter(not_here_ep)

            if n_remove_edges > 0:
                print(
                    '[tsnetwork] Removed {2} random edges: (|V|, |E|) = ({0}, {1}).'
                    .format(gv.num_vertices(), gv.num_edges(), n_remove_edges))

            # Filter the graph s.t. only the largest connected component
            # remains.
            if args.strip_graph:
                largest_connected_component = gt.label_largest_component(gv)
                gv.set_vertex_filter(largest_connected_component)
                gv.purge_vertices()
                print(
                    '[tsnetwork] Filtered largest component: (|V|, |E|) = ({0}, {1}).'
                    .format(gv.num_vertices(), gv.num_edges()))

            if args.pre_sfdp or args.only_sfdp:
                # Perform a SFDP layout (either as the only layout or as a
                # starting point for t-SNE.)
                Y_init, _ = sfdp_placement(
                    gv,
                    output_folder,
                    ask_for_acceptance=not args.accept_all_sfdp,
                    opacity=args.opacity)
                if args.only_sfdp:
                    continue
            else:
                # Random positions will be generated
                Y_init = None

            # Compute distance matrix of this graph with the specified metric
            X = distance_matrix.get_distance_matrix(gv, args.distance_metric)

            # Retrieve the adjacency matrix of the graph
            Adj_sparse = gt.adjacency(gv)
            Adj = np.zeros(Adj_sparse.shape, dtype='float32')
            for i, j in zip(*Adj_sparse.nonzero()):
                Adj[i, j] = Adj_sparse[i, j]

            # Make list of tsnetwork configuration objects. These are objects
            # that represent a configuration for a t-SNE layout.
            tsn_configs = []
            for perplexity, n_epochs, initial_lr, final_lr, lr_switch, initial_momentum,\
                final_momentum, momentum_switch,\
                initial_l_kl, final_l_kl, l_kl_switch,\
                initial_l_e, final_l_e, l_e_switch,\
                initial_l_c, final_l_c, l_c_switch,\
                initial_l_r, final_l_r, l_r_switch,\
                r_eps, k, rnd_seed in itertools.product(
                    args.perplexity, args.n_epochs, args.lr_init, args.lr_final,
                    args.lr_switch, args.momentum_init, args.momentum_final,
                    args.momentum_switch,
                    args.l_kl_init, args.l_kl_final, args.l_kl_switch,
                    args.l_e_init, args.l_e_final, args.l_e_switch,
                    args.l_c_init, args.l_c_final, args.l_c_switch,
                    args.l_r_init, args.l_r_final, args.l_r_switch,
                    args.r_eps, args.k, args.rnd_seed):

                # Use 50% for the switching points if no argument is given
                if lr_switch is None:
                    lr_switch = int(n_epochs * 0.5)
                if momentum_switch is None:
                    momentum_switch = int(n_epochs * 0.5)
                if l_kl_switch is None:
                    l_kl_switch = int(n_epochs * 0.5)
                if l_e_switch is None:
                    l_e_switch = int(n_epochs * 0.5)
                if l_c_switch is None:
                    l_c_switch = int(n_epochs * 0.5)
                if l_r_switch is None:
                    l_r_switch = int(n_epochs * 0.5)

                if final_lr is None:
                    final_lr = initial_lr

                cfg = TsnConfig(perplexity=perplexity,
                                n_epochs=n_epochs,
                                initial_lr=initial_lr,
                                final_lr=final_lr,
                                lr_switch=lr_switch,
                                initial_momentum=initial_momentum,
                                final_momentum=final_momentum,
                                momentum_switch=momentum_switch,
                                initial_l_kl=initial_l_kl,
                                final_l_kl=final_l_kl,
                                l_kl_switch=l_kl_switch,
                                initial_l_e=initial_l_e,
                                final_l_e=final_l_e,
                                l_e_switch=l_e_switch,
                                initial_l_c=initial_l_c,
                                final_l_c=final_l_c,
                                l_c_switch=l_c_switch,
                                initial_l_r=initial_l_r,
                                final_l_r=final_l_r,
                                l_r_switch=l_r_switch,
                                r_eps=r_eps,
                                k=k,
                                pre_sfdp=args.pre_sfdp,
                                rmv_edge_frac=rmv_edge_frac,
                                rnd_seed=rnd_seed,
                                distance_matrix=args.distance_metric)

                # Do no add the configurations that already have files matching
                # the description, unless the user confirms to overwrite.
                if any([
                        file.startswith(cfg.get_description() + '.')
                        for file in os.listdir(output_folder)
                ]):
                    if not usr_input.confirm('[tsnetwork] ' +
                                             cfg.get_description() +
                                             ' files exists! Overwrite?'):
                        continue
                tsn_configs.append(cfg)

            # Loop over the t-SNE configurations for a single graph
            for cfg in tsn_configs:
                print('[tsnetwork] Processing: ' + cfg.get_description())

                # String that has the path to the directory where the snapshots
                # will come. (If --save_every is given)
                snaps_dir = output_folder + '/snaps_' + cfg.get_description()

                # Clean out existing snaps directory if it exists.
                if args.save_every is not None and os.path.exists(snaps_dir):
                    if usr_input.confirm('[tsnetwork] ' + snaps_dir +
                                         ' exists. Delete contents?'):
                        for file in os.listdir(snaps_dir):
                            file_path = os.path.join(snaps_dir, file)
                            try:
                                if os.path.isfile(file_path):
                                    os.unlink(file_path)
                                elif os.path.isdir(file_path):
                                    shutil.rmtree(file_path)
                            except Exception as e:
                                print(e)
                elif args.save_every is not None and not os.path.exists(
                        snaps_dir):
                    # Make folder for snaps, if it is necessary and it doesn't
                    # exist yet.
                    os.makedirs(snaps_dir)

                # Apply the transfer function
                X_transfered = X**cfg.k

                # Try to do the tsne layout.
                try:
                    Y, costs = thesne.tsne(
                        X_transfered,
                        random_state=cfg.rnd_seed,
                        perplexity=cfg.perplexity,
                        n_epochs=cfg.n_epochs,
                        Y=Y_init,
                        initial_lr=cfg.initial_lr,
                        final_lr=cfg.final_lr,
                        lr_switch=cfg.lr_switch,
                        initial_momentum=cfg.initial_momentum,
                        final_momentum=cfg.final_momentum,
                        momentum_switch=cfg.momentum_switch,
                        initial_l_kl=cfg.initial_l_kl,
                        final_l_kl=cfg.final_l_kl,
                        l_kl_switch=cfg.l_kl_switch,
                        initial_l_e=cfg.initial_l_e,
                        final_l_e=cfg.final_l_e,
                        l_e_switch=cfg.l_e_switch,
                        initial_l_c=cfg.initial_l_c,
                        final_l_c=cfg.final_l_c,
                        l_c_switch=cfg.l_c_switch,
                        initial_l_r=cfg.initial_l_r,
                        final_l_r=cfg.final_l_r,
                        l_r_switch=cfg.l_r_switch,
                        r_eps=cfg.r_eps,
                        Adj=Adj,
                        g=gv,
                        snaps_output_folder=snaps_dir,
                        save_every=args.save_every)
                except (thesne.NaNException, thesne.SigmaTooLowException) as e:
                    there_were_exceptions = True
                    print('[exception] {0}'.format(e))

                    # Also write exception to a file.
                    with open(
                            output_folder + '/exception_' +
                            cfg.get_description() + '.out', 'w') as f:
                        print('{0}'.format(e), file=f)
                        f.close()
                    print('[tsnetwork] Continuing with next TsnConfig.')
                    continue

                # Render an animation of the snapshots
                if args.render_video:
                    animations.save_animation(snaps_dir, cfg.get_description())

                # Remove the directory with snapshots.
                if args.save_every is not None and not args.retain_snaps and os.path.exists(
                        snaps_dir):
                    print('[tsnetwork] Cleaning up snaps directory.')
                    shutil.rmtree(snaps_dir)

                # Save the data (graph, vertex coordinates)
                if args.save_layout_data:
                    layout_io.save_vna_layout(
                        output_folder + '/layout_' + cfg.get_description() +
                        '.vna', gv, Y)
                    layout_io.save_layout_txt(
                        output_folder + '/layout_edges_' +
                        cfg.get_description() + '.txt', gv, Y)

                # Save final drawing of the layout
                layout_io.save_drawing(output_folder,
                                       gv,
                                       Y.T,
                                       cfg.get_description(),
                                       formats=['jpg', 'pdf'],
                                       edge_colors="rgb",
                                       draw_vertices=False,
                                       opacity=args.opacity)

    if there_were_exceptions:
        print('[tsnetwork] Done! However, be wary. There were exceptions.')
    else:
        print('[tsnetwork] Done!')
Esempio n. 11
0
def graph_union(g, h):
  g = gt.Graph(g, prune=True)
  in_h_notin_g = (gt.adjacency(h) - gt.adjacency(g)).toarray() > 0
  edges = transpose(triu(in_h_notin_g, 1).nonzero())
  g.add_edge_list(edges)
  return g
def shortest_path_kernel(graph_db, hashed_attributes, *kwargs):
    compute_gram_matrix = kwargs[0]
    normalize_gram_matrix = kwargs[1]
    use_labels = kwargs[2]

    num_vertices = 0
    for g in graph_db:
        num_vertices += g.num_vertices()

    offset = 0
    graph_indices = []
    colors_0 = np.zeros(num_vertices, dtype=np.int64)

    # Get labels (colors) from all graph instances
    offset = 0
    for g in graph_db:
        graph_indices.append((offset, offset + g.num_vertices() - 1))

        if use_labels == 1:
            for i, v in enumerate(g.vertices()):
                colors_0[i + offset] = g.vp.nl[v]
        if use_labels == 2:
            for i, v in enumerate(g.vertices()):
                colors_0[i + offset] = v.out_degree()

        offset += g.num_vertices()
    _, colors_0 = np.unique(colors_0, return_inverse=True)

    colors_1 = hashed_attributes

    triple_indices = []
    triple_offset = 0
    triples = []

    # Solve APSP problem for every graphs in graph data base
    for i, g in enumerate(graph_db):
        a = gt.adjacency(g)
        M = csg.shortest_path(a, method='J', directed=False, unweighted=True)

        index = graph_indices[i]

        if use_labels:
            l = colors_0[index[0]:index[1] + 1]
            h = colors_1[index[0]:index[1] + 1]
        else:
            h = colors_1[index[0]:index[1] + 1]
        d = M.shape[0]

        # For each pair of vertices collect labels, hashed attributes, and shortest-path distance
        pairs = list(it.product(range(d), repeat=2))
        if use_labels:
            t = [
                hash((l[k], h[k], l[j], h[j], M[k][j])) for (k, j) in pairs
                if (k != j or ~np.isinf(M[k][j]))
            ]
        else:
            t = [
                hash((h[k], h[j], M[k][j])) for (k, j) in pairs
                if (k != j or ~np.isinf(M[k][j]))
            ]

        triples.extend(t)

        triple_indices.append((triple_offset, triple_offset + len(t) - 1))
        triple_offset += len(t)

    _, colors = np.unique(triples, return_inverse=True)
    m = np.amax(colors) + 1

    # Compute feature vectors
    feature_vectors = []
    for i, index in enumerate(triple_indices):
        feature_vectors.append(
            np.bincount(colors[index[0]:index[1] + 1], minlength=m))

    if not compute_gram_matrix:
        return lil.lil_matrix(feature_vectors, dtype=np.float64)
    else:
        # Make feature vectors sparse
        gram_matrix = csr.csr_matrix(feature_vectors, dtype=np.float64)
        # Compute gram matrix
        gram_matrix = gram_matrix.dot(gram_matrix.T)

        gram_matrix = gram_matrix.toarray()

        if normalize_gram_matrix:
            return aux.normalize_gram_matrix(gram_matrix)
        else:
            return gram_matrix
Esempio n. 13
0
Nres = args.N # number of lattice sites on an edge
M = 2 # edge length (w.r.t. Nphys) of cubes to be removed, total vol removed is M**3
p = args.p # volume fraction
tol = 1e-3 # desired tolerance on volume fraction for generation

log.info("Nphys = %d, Nres = %d, M = %d, p = %f" % (Nphys,Nres,M,p))

flags = resnet.discrete_pore_space(Nphys,M,p,tol)

# generate a matrix with True volume fraction close to desired value

log.info("generating lattice")

g = gt.lattice([Nres,Nres,Nres])

mat = triu(gt.adjacency(g))

bonds = np.vstack([mat.row,mat.col])

r1 = 8e-9
r2 = 25e-9

log.info("warping lattice")
x,y,z = resnet.bonds_to_xyz(bonds,Nres,r1,r2)

# coordinates inside a certain radius
# inner = np.argwhere(np.sqrt(x**2 + y**2 + z**2) < (r1 + 0.2*(r2-r1))).flatten()

# fit lattice into space of 'physical matrix' indices
x = ne.evaluate('(x/r2/2+0.5)*(Nphys-1)')
y = ne.evaluate('(y/r2/2+0.5)*(Nphys-1)')
Esempio n. 14
0
    common_neighbors = np.sum(x * y)
    if common_neighbors >= threshold:
        return True
    else:
        return False


A = args.alpha
B = args.beta


def P(k, alpha=A, beta=B):
    return 1 - (1 - beta) * (1 - alpha)**k


adj = gp.adjacency(graph).toarray()  # matriz de adyacencia
N = graph.num_vertices()

one = np.ones_like(adj)
disjoin = one - adj  # matriz de no-vecinos

pairs = 0
sameType_pairs = 0
expected_pairs = 0
for i in range(N):
    for j in range(i + 1, N):  # Suma sobre j>i
        if disjoin[i, j] and F(
                adj[i, :], adj[j, :], args.threshold
        ):  # si no es vecino y cumple F es un par que nos interesa
            pairs += 1
            if graph.vp.essential[i] == graph.vp.essential[