Beispiel #1
0
    def __init__(self, N=64, Nc=2, regular=False, n_try=50,
                 distribute=False, connected=True, seed=None, **kwargs):

        self.Nc = Nc
        self.regular = regular
        self.n_try = n_try
        self.distribute = distribute
        self.seed = seed

        self.logger = utils.build_logger(__name__)

        if connected:
            for x in range(self.n_try):
                W, coords = self._create_weight_matrix(N, distribute,
                                                       regular, Nc)
                self.W = W

                if self.is_connected(recompute=True):
                    break

                elif x == self.n_try - 1:
                    self.logger.warning('Graph is not connected.')
        else:
            W, coords = self._create_weight_matrix(N, distribute, regular, Nc)

        W = sparse.lil_matrix(W)
        W = utils.symmetrize(W, method='average')

        gtype = 'regular sensor' if self.regular else 'sensor'

        plotting = {'limits': np.array([0, 1, 0, 1])}

        super(Sensor, self).__init__(W=W, coords=coords, gtype=gtype,
                                     plotting=plotting, **kwargs)
Beispiel #2
0
    def __init__(self, W, gtype='unknown', lap_type='combinatorial',
                 coords=None, plotting={}, **kwargs):

        self.logger = build_logger(__name__, **kwargs)

        shapes = np.shape(W)
        if len(shapes) != 2 or shapes[0] != shapes[1]:
            self.logger.error('W has incorrect shape {}'.format(shapes))

        self.N = shapes[0]
        self.W = sparse.lil_matrix(W)
        self.A = self.W > 0

        self.Ne = self.W.nnz
        self.d = self.A.sum(axis=1)
        self.gtype = gtype
        self.lap_type = lap_type

        self.is_connected()
        self.create_laplacian(lap_type)

        if isinstance(coords, np.ndarray) and 2 <= len(np.shape(coords)) <= 3:
            self.coords = coords
        else:
            self.coords = np.ndarray(None)

        # Plotting default parameters
        self.plotting = {'vertex_size': 10, 'edge_width': 1,
                         'edge_style': '-', 'vertex_color': 'b'}

        if isinstance(plotting, dict):
            self.plotting.update(plotting)
Beispiel #3
0
    def __init__(self, N=64, k=6, maxIter=10, seed=None, **kwargs):
        self.k = k

        self.logger = utils.build_logger(__name__)

        rs = np.random.RandomState(seed)

        # continue until a proper graph is formed
        if (N * k) % 2 == 1:
            raise ValueError("input error: N*d must be even!")

        # a list of open half-edges
        U = np.kron(np.ones(k), np.arange(N))

        # the graphs adjacency matrix
        A = sparse.lil_matrix(np.zeros((N, N)))

        edgesTested = 0
        repetition = 1

        while np.size(U) and repetition < maxIter:
            edgesTested += 1

            # print(progess)
            if edgesTested % 5000 == 0:
                self.logger.debug("createRandRegGraph() progress: edges= "
                                  "{}/{}.".format(edgesTested, N * k / 2))

            # chose at random 2 half edges
            i1 = rs.randint(0, np.shape(U)[0])
            i2 = rs.randint(0, np.shape(U)[0])
            v1 = U[i1]
            v2 = U[i2]

            # check that there are no loops nor parallel edges
            if v1 == v2 or A[v1, v2] == 1:
                # restart process if needed
                if edgesTested == N * k:
                    repetition = repetition + 1
                    edgesTested = 0
                    U = np.kron(np.ones(k), np.arange(N))
                    A = sparse.lil_matrix(np.zeros((N, N)))
            else:
                # add edge to graph
                A[v1, v2] = 1
                A[v2, v1] = 1

                # remove used half-edges
                v = sorted([i1, i2])
                U = np.concatenate((U[:v[0]], U[v[0] + 1:v[1]], U[v[1] + 1:]))

        super(RandomRegular, self).__init__(W=A,
                                            gtype="random_regular",
                                            **kwargs)

        self.is_regular()
Beispiel #4
0
    def __init__(self, N=64, k=6, max_iter=10, seed=None, **kwargs):

        self.k = k
        self.max_iter = max_iter
        self.seed = seed

        self.logger = utils.build_logger(__name__)

        rs = np.random.RandomState(seed)

        # continue until a proper graph is formed
        if (N * k) % 2 == 1:
            raise ValueError("input error: N*d must be even!")

        # a list of open half-edges
        U = np.kron(np.ones(k), np.arange(N))

        # the graphs adjacency matrix
        A = sparse.lil_matrix(np.zeros((N, N)))

        edgesTested = 0
        repetition = 1

        while np.size(U) and repetition < max_iter:
            edgesTested += 1

            if edgesTested % 5000 == 0:
                self.logger.debug("createRandRegGraph() progress: edges= "
                                  "{}/{}.".format(edgesTested, N*k/2))

            # chose at random 2 half edges
            i1 = rs.randint(0, np.shape(U)[0])
            i2 = rs.randint(0, np.shape(U)[0])
            v1 = U[i1]
            v2 = U[i2]

            # check that there are no loops nor parallel edges
            if v1 == v2 or A[v1, v2] == 1:
                # restart process if needed
                if edgesTested == N*k:
                    repetition = repetition + 1
                    edgesTested = 0
                    U = np.kron(np.ones(k), np.arange(N))
                    A = sparse.lil_matrix(np.zeros((N, N)))
            else:
                # add edge to graph
                A[v1, v2] = 1
                A[v2, v1] = 1

                # remove used half-edges
                v = sorted([i1, i2])
                U = np.concatenate((U[:v[0]], U[v[0] + 1:v[1]], U[v[1] + 1:]))

        super(RandomRegular, self).__init__(A, **kwargs)

        self.is_regular()
Beispiel #5
0
    def __init__(self, N=64, k=6, **kwargs):
        self.k = k

        # Build the logger as createRandRegGraph need it
        self.logger = build_logger(__name__, **kwargs)

        W = self.createRandRegGraph(N, k)

        super(RandomRegular, self).__init__(W=W, gtype="random_regular",
                                            **kwargs)
Beispiel #6
0
    def __init__(self, W, lap_type='combinatorial', coords=None, plotting={}):

        self.logger = utils.build_logger(__name__)

        if len(W.shape) != 2 or W.shape[0] != W.shape[1]:
            raise ValueError('W has incorrect shape {}'.format(W.shape))

        # CSR sparse matrices are the most efficient for matrix multiplication.
        # They are the sole sparse matrix type to support eliminate_zeros().
        if sparse.isspmatrix_csr(W):
            self.W = W
        else:
            self.W = sparse.csr_matrix(W)

        # Don't keep edges of 0 weight. Otherwise Ne will not correspond to the
        # real number of edges. Problematic when e.g. plotting.
        self.W.eliminate_zeros()

        self.n_nodes = W.shape[0]

        # TODO: why would we ever want this?
        # For large matrices it slows the graph construction by a factor 100.
        # self.W = sparse.lil_matrix(self.W)

        # Don't count edges two times if undirected.
        # Be consistent with the size of the differential operator.
        if self.is_directed():
            self.n_edges = self.W.nnz
        else:
            diagonal = np.count_nonzero(self.W.diagonal())
            off_diagonal = self.W.nnz - diagonal
            self.n_edges = off_diagonal // 2 + diagonal

        self.check_weights()

        self.compute_laplacian(lap_type)

        if coords is not None:
            self.coords = coords

        self.plotting = {
            'vertex_size': 100,
            'vertex_color': (0.12, 0.47, 0.71, 1),
            'edge_color': (0.5, 0.5, 0.5, 1),
            'edge_width': 1,
            'edge_style': '-'
        }
        self.plotting.update(plotting)

        # TODO: kept for backward compatibility.
        self.Ne = self.n_edges
        self.N = self.n_nodes
Beispiel #7
0
    def __init__(self, G, filters=None, **kwargs):

        self.logger = utils.build_logger(__name__, **kwargs)

        if not hasattr(G, 'lmax'):
            self.logger.info('{} : has to compute lmax'.format(
                self.__class__.__name__))
            G.estimate_lmax()

        self.G = G

        if filters:
            if isinstance(filters, list):
                self.g = filters
            else:
                self.g = [filters]
        else:
            self.g = []
Beispiel #8
0
    def __init__(self,
                 W,
                 gtype='unknown',
                 lap_type='combinatorial',
                 coords=None,
                 plotting={}):

        self.logger = utils.build_logger(__name__)

        if len(W.shape) != 2 or W.shape[0] != W.shape[1]:
            raise ValueError('W has incorrect shape {}'.format(W.shape))

        # Don't keep edges of 0 weight. Otherwise Ne will not correspond to the
        # real number of edges. Problematic when e.g. plotting.
        W = sparse.csr_matrix(W)
        W.eliminate_zeros()

        self.N = W.shape[0]
        self.W = sparse.lil_matrix(W)

        # Don't count edges two times if undirected.
        # Be consistent with the size of the differential operator.
        if self.is_directed():
            self.Ne = self.W.nnz
        else:
            self.Ne = sparse.tril(W).nnz

        self.check_weights()

        self.gtype = gtype

        self.compute_laplacian(lap_type)

        if coords is not None:
            self.coords = coords

        self.plotting = {
            'vertex_size': 100,
            'vertex_color': (0.12, 0.47, 0.71, 1),
            'edge_color': (0.5, 0.5, 0.5, 1),
            'edge_width': 1,
            'edge_style': '-'
        }
        self.plotting.update(plotting)
Beispiel #9
0
# -*- coding: utf-8 -*-

import numpy as np
from scipy import sparse, linalg

from pygsp import utils

logger = utils.build_logger(__name__)


class FourierMixIn(object):
    def _check_fourier_properties(self, name, desc):
        if getattr(self, '_' + name) is None:
            self.logger.warning('The {} G.{} is not available, we need to '
                                'compute the Fourier basis. Explicitly call '
                                'G.compute_fourier_basis() once beforehand '
                                'to suppress the warning.'.format(desc, name))
            self.compute_fourier_basis()
        return getattr(self, '_' + name)

    @property
    def U(self):
        r"""Fourier basis (eigenvectors of the Laplacian).

        Is computed by :meth:`compute_fourier_basis`.
        """
        return self._check_fourier_properties('U', 'Fourier basis')

    @property
    def e(self):
        r"""Eigenvalues of the Laplacian (square of graph frequencies).
Beispiel #10
0
# -*- coding: utf-8 -*-
r"""This module contains functionalities for the reduction of graphs' vertex set while keeping the graph structure."""

from pygsp.utils import resistance_distance, build_logger
from pygsp.graphs import Graph
from pygsp.filters import Filter

import numpy as np
from scipy import sparse, stats
from scipy.sparse.linalg import eigs, spsolve

logger = build_logger(__name__)


def graph_sparsify(M, epsilon, maxiter=10):
    r"""
    Sparsify a graph using Spielman-Srivastava algorithm.

    Parameters
    ----------
    M : Graph or sparse matrix
        Graph structure or a Laplacian matrix
    epsilon : int
        Sparsification parameter

    Returns
    -------
    Mnew : Graph or sparse matrix
        New graph structure or sparse matrix

    Note
Beispiel #11
0
    def __init__(self,
                 adjacency,
                 lap_type='combinatorial',
                 coords=None,
                 plotting={}):

        self.logger = utils.build_logger(__name__)

        if not sparse.isspmatrix(adjacency):
            adjacency = np.asanyarray(adjacency)

        if (adjacency.ndim != 2) or (adjacency.shape[0] != adjacency.shape[1]):
            raise ValueError('Adjacency: must be a square matrix.')

        # CSR sparse matrices are the most efficient for matrix multiplication.
        # They are the sole sparse matrix type to support eliminate_zeros().
        self._adjacency = sparse.csr_matrix(adjacency, copy=False)

        if np.isnan(self._adjacency.sum()):
            raise ValueError('Adjacency: there is a Not a Number (NaN).')
        if np.isinf(self._adjacency.sum()):
            raise ValueError('Adjacency: there is an infinite value.')
        if self.has_loops():
            self.logger.warning('Adjacency: there are self-loops '
                                '(non-zeros on the diagonal). '
                                'The Laplacian will not see them.')
        if (self._adjacency < 0).nnz != 0:
            self.logger.warning('Adjacency: there are negative edge weights.')

        self.n_vertices = self._adjacency.shape[0]

        # Don't keep edges of 0 weight. Otherwise n_edges will not correspond
        # to the real number of edges. Problematic when plotting.
        self._adjacency.eliminate_zeros()

        self._directed = None
        self._connected = None

        # Don't count edges two times if undirected.
        # Be consistent with the size of the differential operator.
        if self.is_directed():
            self.n_edges = self._adjacency.nnz
        else:
            diagonal = np.count_nonzero(self._adjacency.diagonal())
            off_diagonal = self._adjacency.nnz - diagonal
            self.n_edges = off_diagonal // 2 + diagonal

        if coords is not None:
            # TODO: self.coords should be None if unset.
            self.coords = np.asanyarray(coords)

        self.plotting = {
            'vertex_size': 100,
            'vertex_color': (0.12, 0.47, 0.71, 0.5),
            'edge_color': (0.5, 0.5, 0.5, 0.5),
            'edge_width': 2,
            'edge_style': '-',
            'highlight_color': 'C1',
            'normalize_intercept': .25,
        }
        self.plotting.update(plotting)
        self.signals = dict()

        # Attributes that are lazily computed.
        self._A = None
        self._d = None
        self._dw = None
        self._lmax = None
        self._lmax_method = None
        self._U = None
        self._e = None
        self._coherence = None
        self._D = None
        # self._L = None

        # TODO: what about Laplacian? Lazy as Fourier, or disallow change?
        self.lap_type = lap_type
        self.compute_laplacian(lap_type)

        # TODO: kept for backward compatibility.
        self.Ne = self.n_edges
        self.N = self.n_vertices
Beispiel #12
0
# -*- coding: utf-8 -*-

r"""
The :mod:`pygsp.optimization` module provides tools to solve convex
optimization problems on graphs.
"""

from pygsp import utils


logger = utils.build_logger(__name__)


def _import_pyunlocbox():
    try:
        from pyunlocbox import functions, solvers
    except Exception as e:
        raise ImportError('Cannot import pyunlocbox, which is needed to solve '
                          'this optimization problem. Try to install it with '
                          'pip (or conda) install pyunlocbox. '
                          'Original exception: {}'.format(e))
    return functions, solvers


def prox_tv(x, gamma, G, A=None, At=None, nu=1, tol=10e-4, maxit=200, use_matrix=True):
    r"""
    Total Variation proximal operator for graphs.

    This function computes the TV proximal operator for graphs. The TV norm
    is the one norm of the gradient. The gradient is defined in the
    function :meth:`pygsp.graphs.Graph.grad`.
Beispiel #13
0
    def __init__(self,
                 N=256,
                 Nc=None,
                 min_comm=None,
                 min_deg=None,
                 comm_sizes=None,
                 size_ratio=1,
                 world_density=None,
                 comm_density=None,
                 k_neigh=None,
                 epsilon=None,
                 seed=None,
                 **kwargs):

        if Nc is None:
            Nc = int(round(np.sqrt(N) / 2))
        if min_comm is None:
            min_comm = int(round(N / (3 * Nc)))
        if min_deg is not None:
            raise NotImplementedError
        if world_density is None:
            world_density = 1 / N
        if not 0 <= world_density <= 1:
            raise ValueError('World density should be in [0, 1].')
        if epsilon is None:
            epsilon = np.sqrt(2 * np.sqrt(N)) / 2

        self.Nc = Nc
        self.min_comm = min_comm
        self.comm_sizes = comm_sizes
        self.size_ratio = size_ratio
        self.world_density = world_density
        self.comm_density = comm_density
        self.k_neigh = k_neigh
        self.epsilon = epsilon
        self.seed = seed

        rs = np.random.RandomState(seed)

        self.logger = utils.build_logger(__name__)
        w_data = [[], [[], []]]

        if min_comm * Nc > N:
            raise ValueError('The constraint on minimum size for communities is unsolvable.')

        info = {'node_com': None, 'comm_sizes': None, 'world_rad': None,
                'world_density': world_density, 'min_comm': min_comm}

        # Communities construction #
        if comm_sizes is None:
            mandatory_labels = np.tile(np.arange(Nc), (min_comm,))  # min_comm labels for each of the Nc communities
            remaining_labels = rs.choice(Nc, N - min_comm * Nc)  # random choice for the remaining labels
            info['node_com'] = np.sort(np.concatenate((mandatory_labels, remaining_labels)))
        else:
            if len(comm_sizes) != Nc:
                raise ValueError('There should be Nc community sizes.')
            if np.sum(comm_sizes) != N:
                raise ValueError('The sum of community sizes should be N.')
            # create labels based on the constraint given for the community sizes. No random assignation here.
            info['node_com'] = np.concatenate([[val] * cnt for (val, cnt) in enumerate(comm_sizes)])

        counts = collections.Counter(info['node_com'])
        info['comm_sizes'] = np.array([cnt[1] for cnt in sorted(counts.items())])
        info['world_rad'] = size_ratio * np.sqrt(N)

        # Intra-community edges construction #
        if comm_density is not None:
            # random picking edges following the community density (same for all communities)
            if not 0 <= comm_density <= 1:
                raise ValueError('comm_density should be between 0 and 1.')
            info['comm_density'] = comm_density
            self.logger.info('Constructed using community density = {}'.format(comm_density))
        elif k_neigh is not None:
            # k-NN among the nodes in the same community (same k for all communities)
            if k_neigh < 0:
                raise ValueError('k_neigh cannot be negative.')
            info['k_neigh'] = k_neigh
            self.logger.info('Constructed using K-NN with k = {}'.format(k_neigh))
        else:
            # epsilon-NN among the nodes in the same community (same eps for all communities)
            info['epsilon'] = epsilon
            self.logger.info('Constructed using eps-NN with eps = {}'.format(epsilon))

        # Coordinates #
        info['com_coords'] = info['world_rad'] * np.array(list(zip(
            np.cos(2 * np.pi * np.arange(1, Nc + 1) / Nc),
            np.sin(2 * np.pi * np.arange(1, Nc + 1) / Nc))))

        coords = rs.rand(N, 2)  # nodes' coordinates inside the community
        coords = np.array([[elem[0] * np.cos(2 * np.pi * elem[1]),
                            elem[0] * np.sin(2 * np.pi * elem[1])] for elem in coords])

        for i in range(N):
            # set coordinates as an offset from the center of the community it belongs to
            comm_idx = info['node_com'][i]
            comm_rad = np.sqrt(info['comm_sizes'][comm_idx])
            coords[i] = info['com_coords'][comm_idx] + comm_rad * coords[i]

        first_node = 0
        for i in range(Nc):
            com_siz = info['comm_sizes'][i]
            M = com_siz * (com_siz - 1) / 2

            if comm_density is not None:
                nb_edges = int(comm_density * M)
                tril_ind = np.tril_indices(com_siz, -1)
                indices = rs.permutation(int(M))[:nb_edges]

                w_data[0] += [1] * nb_edges
                w_data[1][0] += [first_node + tril_ind[1][elem] for elem in indices]
                w_data[1][1] += [first_node + tril_ind[0][elem] for elem in indices]

            elif k_neigh is not None:
                comm_coords = coords[first_node:first_node + com_siz]
                kdtree = spatial.KDTree(comm_coords)
                __, indices = kdtree.query(comm_coords, k=k_neigh + 1)

                pairs_set = set()
                map(lambda row: map(lambda elm: pairs_set.add((min(row[0], elm), max(row[0], elm))), row[1:]), indices)

                w_data[0] += [1] * len(pairs_set)
                w_data[1][0] += [first_node + pair[0] for pair in pairs_set]
                w_data[1][1] += [first_node + pair[1] for pair in pairs_set]

            else:
                comm_coords = coords[first_node:first_node + com_siz]
                kdtree = spatial.KDTree(comm_coords)
                pairs_set = kdtree.query_pairs(epsilon)

                w_data[0] += [1] * len(pairs_set)
                w_data[1][0] += [first_node + elem[0] for elem in pairs_set]
                w_data[1][1] += [first_node + elem[1] for elem in pairs_set]

            first_node += com_siz

        # Inter-community edges construction #
        M = (N**2 - np.sum([com_siz**2 for com_siz in info['comm_sizes']])) / 2
        nb_edges = int(world_density * M)

        if world_density < 0.35:
            # use regression sampling
            inter_edges = set()
            while len(inter_edges) < nb_edges:
                new_point = rs.randint(0, N, 2)
                if info['node_com'][min(new_point)] != info['node_com'][max(new_point)]:
                    inter_edges.add((min(new_point), max(new_point)))
        else:
            # use random permutation
            indices = rs.permutation(int(M))[:nb_edges]
            all_points, first_col = [], 0
            for i in range(Nc - 1):
                nb_col = info['comm_sizes'][i]
                first_row = np.sum(info['comm_sizes'][:i+1])

                for j in range(i+1, Nc):
                    nb_row = info['comm_sizes'][j]
                    all_points += [(first_row + r, first_col + c) for r in range(nb_row) for c in range(nb_col)]

                    first_row += nb_row
                first_col += nb_col

            inter_edges = np.array(all_points)[indices]

        w_data[0] += [1] * nb_edges
        w_data[1][0] += [elem[0] for elem in inter_edges]
        w_data[1][1] += [elem[1] for elem in inter_edges]

        w_data[0] += w_data[0]
        tmp_w_data = copy.deepcopy(w_data[1][0])
        w_data[1][0] += w_data[1][1]
        w_data[1][1] += tmp_w_data
        w_data[1] = tuple(w_data[1])

        W = sparse.coo_matrix(tuple(w_data), shape=(N, N))

        for key, value in {'Nc': Nc, 'info': info}.items():
            setattr(self, key, value)

        super(Community, self).__init__(W, coords=coords, **kwargs)
Beispiel #14
0
    def __init__(self, adjacency, lap_type='combinatorial', coords=None,
                 plotting={}):

        self.logger = utils.build_logger(__name__)

        if not sparse.isspmatrix(adjacency):
            adjacency = np.asanyarray(adjacency)

        if (adjacency.ndim != 2) or (adjacency.shape[0] != adjacency.shape[1]):
            raise ValueError('Adjacency: must be a square matrix.')

        # CSR sparse matrices are the most efficient for matrix multiplication.
        # They are the sole sparse matrix type to support eliminate_zeros().
        self._adjacency = sparse.csr_matrix(adjacency, copy=False)

        if np.isnan(self._adjacency.sum()):
            raise ValueError('Adjacency: there is a Not a Number (NaN).')
        if np.isinf(self._adjacency.sum()):
            raise ValueError('Adjacency: there is an infinite value.')
        if self.has_loops():
            self.logger.warning('Adjacency: there are self-loops '
                                '(non-zeros on the diagonal). '
                                'The Laplacian will not see them.')
        if (self._adjacency < 0).nnz != 0:
            self.logger.warning('Adjacency: there are negative edge weights.')

        self.n_vertices = self._adjacency.shape[0]

        # Don't keep edges of 0 weight. Otherwise n_edges will not correspond
        # to the real number of edges. Problematic when plotting.
        self._adjacency.eliminate_zeros()

        self._directed = None
        self._connected = None

        # Don't count edges two times if undirected.
        # Be consistent with the size of the differential operator.
        if self.is_directed():
            self.n_edges = self._adjacency.nnz
        else:
            diagonal = np.count_nonzero(self._adjacency.diagonal())
            off_diagonal = self._adjacency.nnz - diagonal
            self.n_edges = off_diagonal // 2 + diagonal

        if coords is not None:
            # TODO: self.coords should be None if unset.
            self.coords = np.asanyarray(coords)

        self.plotting = {'vertex_size': 100,
                         'vertex_color': (0.12, 0.47, 0.71, 0.5),
                         'edge_color': (0.5, 0.5, 0.5, 0.5),
                         'edge_width': 2,
                         'edge_style': '-',
                         'highlight_color': 'C1',
                         'normalize_intercept': .25}
        self.plotting.update(plotting)
        self.signals = dict()

        # Attributes that are lazily computed.
        self._A = None
        self._d = None
        self._dw = None
        self._lmax = None
        self._lmax_method = None
        self._U = None
        self._e = None
        self._coherence = None
        self._D = None
        # self._L = None

        # TODO: what about Laplacian? Lazy as Fourier, or disallow change?
        self.lap_type = lap_type
        self.compute_laplacian(lap_type)

        # TODO: kept for backward compatibility.
        self.Ne = self.n_edges
        self.N = self.n_vertices
Beispiel #15
0
    def __init__(self, N=256, **kwargs):

        # Parameter initialisation #
        N = int(N)
        Nc = int(kwargs.pop('Nc', int(round(np.sqrt(N)/2.))))
        min_comm = int(kwargs.pop('min_comm', int(round(N / (3. * Nc)))))
        min_deg = int(kwargs.pop('min_deg', 0))
        comm_sizes = kwargs.pop('comm_sizes', np.array([]))
        size_ratio = float(kwargs.pop('size_ratio', 1.))
        world_density = float(kwargs.pop('world_density', 1. / N))
        world_density = world_density if 0 <= world_density <= 1 else 1. / N
        comm_density = kwargs.pop('comm_density', None)
        k_neigh = kwargs.pop('k_neigh', None)
        epsilon = float(kwargs.pop('epsilon', np.sqrt(2 * np.sqrt(N)) / 2))

        self.logger = build_logger(__name__, **kwargs)
        w_data = [[], [[], []]]

        try:
            if len(comm_sizes) > 0:
                if np.sum(comm_sizes) != N:
                    raise ValueError('GSP_COMMUNITY: The sum of the community sizes has to be equal to N.')
                if len(comm_sizes) != Nc:
                    raise ValueError('GSP_COMMUNITY: The length of the community sizes has to be equal to Nc.')

        except TypeError:
            raise TypeError("GSP_COMMUNITY: comm_sizes expected to be a list or array, got {}".format(type(comm_sizes)))

        if min_comm * Nc > N:
            raise ValueError('GSP_COMMUNITY: The constraint on minimum size for communities is unsolvable.')

        info = {'node_com': None, 'comm_sizes': None, 'world_rad': None,
                'world_density': world_density, 'min_comm': min_comm}

        # Communities construction #
        if comm_sizes.shape[0] == 0:
            mandatory_labels = np.tile(np.arange(Nc), (min_comm,))  # min_comm labels for each of the Nc communities
            remaining_labels = np.random.choice(Nc, N - min_comm * Nc)  # random choice for the remaining labels
            info['node_com'] = np.sort(np.concatenate((mandatory_labels, remaining_labels)))
        else:
            # create labels based on the constraint given for the community sizes. No random assignation here.
            info['node_com'] = np.concatenate([[val] * cnt for (val, cnt) in enumerate(comm_sizes)])

        counts = Counter(info['node_com'])
        info['comm_sizes'] = np.array([cnt[1] for cnt in sorted(counts.items())])
        info['world_rad'] = size_ratio * np.sqrt(N)

        # Intra-community edges construction #
        if comm_density:
            # random picking edges following the community density (same for all communities)
            comm_density = float(comm_density)
            comm_density = comm_density if 0. <= comm_density <= 1. else 0.1
            info['comm_density'] = comm_density
            self.logger.info("GSP_COMMUNITY: Constructed using community density = {}".format(comm_density))
        elif k_neigh:
            # k-NN among the nodes in the same community (same k for all communities)
            k_neigh = int(k_neigh)
            k_neigh = k_neigh if k_neigh > 0 else 10
            info['k_neigh'] = k_neigh
            self.logger.info("GSP_COMMUNITY: Constructed using K-NN with k = {}".format(k_neigh))
        else:
            # epsilon-NN among the nodes in the same community (same eps for all communities)
            info['epsilon'] = epsilon
            self.logger.info("GSP_COMMUNITY: Constructed using eps-NN with eps = {}".format(epsilon))

        first_node = 0
        for i in range(Nc):
            com_siz = info['comm_sizes'][i]
            M = com_siz * (com_siz - 1) / 2

            if comm_density:
                nb_edges = int(comm_density * M)
                tril_ind = np.tril_indices(com_siz, -1)
                indices = np.random.permutation(M)[:nb_edges]

                w_data[0] += [1] * nb_edges
                w_data[1][0] += [first_node + tril_ind[1][elem] for elem in indices]
                w_data[1][1] += [first_node + tril_ind[0][elem] for elem in indices]

            elif k_neigh:
                comm_coords = coords[first_node:first_node + com_siz]
                kdtree = spatial.KDTree(comm_coords)
                __, indices = kdtree.query(comm_coords, k=k_neigh + 1)

                pairs_set = set()
                map(lambda row: map(lambda elm: pairs_set.add((min(row[0], elm), max(row[0], elm))), row[1:]), indices)

                w_data[0] += [1] * len(pairs_set)
                w_data[1][0] += [first_node + pair[0] for pair in pairs_set]
                w_data[1][1] += [first_node + pair[1] for pair in pairs_set]

            else:
                comm_coords = coords[first_node:first_node + com_siz]
                kdtree = spatial.KDTree(comm_coords)
                pairs_set = kdtree.query_pairs(epsilon)

                w_data[0] += [1] * len(pairs_set)
                w_data[1][0] += [first_node + elem[0] for elem in pairs_set]
                w_data[1][1] += [first_node + elem[1] for elem in pairs_set]

            first_node += com_siz

        # Inter-community edges construction #
        M = (N**2 - np.sum([com_siz**2 for com_siz in info['comm_sizes']])) / 2
        nb_edges = int(world_density * M)

        if world_density < 0.35:
            # use regression sampling
            inter_edges = set()
            while len(inter_edges) < nb_edges:
                new_point = np.random.randint(0, N, 2)
                if info['node_com'][min(new_point)] != info['node_com'][max(new_point)]:
                    inter_edges.add((min(new_point), max(new_point)))
        else:
            # use random permutation
            indices = np.random.permutation(M)[:nb_edges]
            all_points, first_col = [], 0
            for i in range(Nc - 1):
                nb_col = info['comm_sizes'][i]
                first_row = np.sum(info['comm_sizes'][:i+1])

                for j in range(i+1, Nc):
                    nb_row = info['comm_sizes'][j]
                    all_points += [(first_row + r, first_col + c) for r in range(nb_row) for c in range(nb_col)]

                    first_row += nb_row
                first_col += nb_col

            inter_edges = np.array(all_points)[indices]

        w_data[0] += [1] * nb_edges
        w_data[1][0] += [elem[0] for elem in inter_edges]
        w_data[1][1] += [elem[1] for elem in inter_edges]

        w_data[0] += w_data[0]
        tmp_w_data = deepcopy(w_data[1][0])
        w_data[1][0] += w_data[1][1]
        w_data[1][1] += tmp_w_data
        w_data[1] = tuple(w_data[1])

        W = sparse.coo_matrix(tuple(w_data), shape=(N, N))

        for key, value in {'Nc': Nc, 'info': info}:
            setattr(self, key, value)

        super(Community, self).__init__(W=W, gtype='Community', **kwargs)