Example #1
0
def gen_white_noise(n, d, random_state=None):
    """generate white noise, n observations and d features"""
    mean = np.zeros(d)
    cov = np.eye(d)
    if random_state is not None:
        np.random.seed(random_state)
    return to_torch(np.random.multivariate_normal(mean, cov, n))
Example #2
0
def _check_valid_laplacian(L, tol=1e-3):

    L = to_torch(L)
    _check_symmetric(L, 'Laplacian')
    if L.ndim != 2:
        raise ValueError('Adjacency Matrix must have two dimensions')
    if torch.any(L.sum(axis=0) > tol):
        raise ValueError('Laplacian rows do not sum to zero. '
                         'Note that self-loops are not supported.')
Example #3
0
def _check_valid_adjacency(A, tol=1e-3):

    A = to_torch(A)
    _check_symmetric(A, 'Adjacency Matrix')
    if A.ndim != 2:
        raise ValueError('Adjacency Matrix must have two dimensions')
    if torch.any(torch.abs(torch.diag(A)) > tol):
        raise ValueError('Non-zero diagonal entries (i.e. self-loops) '
                         'in the adjacency matrix are not supported.')
    if torch.any(A < 0):
        raise ValueError('Adjacency matrix weights must be positive.')
Example #4
0
def gen_dirac(n, d, seed=None, p=None):
    """generate signal which is -1 with p/2 and 1 with p/2, 0 otherwise"""
    if p is None:
        p = 2 / d

    if seed is not None:
        np.random.seed(seed)
    unif = np.random.rand(n, d)
    out = np.zeros_like(unif)
    out[unif < p / 2] = -1
    out[unif > (1 - p / 2)] = 1
    return to_torch(out[np.abs(out).sum(1) > 0])
Example #5
0
    def fit_filter(self,
                   mat,
                   n_iters=1000,
                   lr_nnet=1e-4,
                   seed=42,
                   mat_is_cov=False,
                   round_adj=False):
        """
        Fits filter on graph topology. 

        mat (np.ndarray):
            Either the signal to fit where columns correspond to graph nodes
            (if mat_is_cov set to False) or an empirical covariance matrix 
            (if mat_is_cov set to True)
        n_iters (int):
            Number of epochs for fitting
        lr_nnet (float):
            Learning rate for neural network
        mat_is_cov (bool):
            Specifies whether input mat is covariance matrix or observed signals.
        round_adj (bool):
            Whether to round the adjacency matrix with a threshold of 0.5 before 
            fitting the filter.
        """
        _seed(seed)
        self._h_of_L = None  # forget about previously computed h(L)

        if mat_is_cov:
            cov = mat
        else:
            cov = np.cov(mat.T)
        sqrt_cov = symsqrt(to_torch(cov))
        evals, evecs = self.get_L_decomp(round=round_adj)

        self.optim_h = torch.optim.Adam(self._h.parameters(), lr=lr_nnet)

        _start_model_tracking(self._h)
        for i in range(int(n_iters)):
            self._optim_h_step(sqrt_cov, evals, evecs)
        _stop_model_tracking(self._h)
Example #6
0
def _check_symmetric(mat, name='Matrix', tol=1e-5):

    mat = to_torch(mat)
    if torch.any((mat - mat.T) > tol):
        raise ValueError(name + ' is not symmetric.')
Example #7
0
    def w(self, value):

        self._w = to_torch(value)
        self._A = w_to_A(value)
        self._L = A_to_L(self._A)
Example #8
0
    def L(self, value):

        _check_valid_laplacian(value)
        self._L = to_torch(value)
        self._A = L_to_A(value)
        self._w = A_to_w(self._A)
Example #9
0
    def fit_graph(self,
                  mat,
                  lr_L=1e-2,
                  lr_h=1e-3,
                  nit=3000,
                  nit_h_per_iter=3,
                  learn_h=True,
                  mat_is_cov=False,
                  fine_tune=False,
                  seed=23,
                  verbose=100,
                  optim_h='gd',
                  optim_L='adam'):
        """
        Fits graph and filter to input signals.
        
        mat (np.ndarray):
            Either the signal to fit where columns correspond to graph nodes
            (if input_is_cov set to False) or an empirical covariance matrix of
            the signal (if input_is_cov set to True)        
        lr_L (float):
            Learning rate for the graph Laplacian update
        lr_h (float):
            Learning rate for the filter update
        nit (int):
            Number of total iterations
        nit_h_per_iter (int):
            Number of updates to the filter per Laplacian update. 
        learn_h (bool):
            Whether to learn the filter h. Set this to False if the object
            was created with a custom filter function h.
        mat_is_cov (bool):
            Specifies whether mat is covariance matrix or signal matrix
        fine_tune (bool):
            Whether to continue optimization for further fine-tuning the model. In
            this case no new optimizers are created.
        seed (int):
            Random seed
        verbose (int):
            Interval after which progress is printed. Set to zero if no output wanted.
        optim_h (str):
            Either 'gd' or 'adam'. Specifies method to use for optimization of the 
            neural network. Gradient descent yields to better results but slightly slows down 
            convergence. If adam is used, we recommend a learning rate of 1e-4. 
            Default is 'gd'.
        optim_L (str):
            Either 'gd' or 'adam'. 
        """

        _seed(seed)
        self._h_of_L = None  # forget about previously computed h(L)

        _, d = mat.shape
        if self._w is None:  # if no w provided, chose random
            self.w = torch.rand(size=(1, (d * (d - 1) // 2)),
                                dtype=torch.float64) - 0.5

        if mat_is_cov:
            cov = to_torch(mat)
        else:
            cov = to_torch(np.cov(mat.T))

        target = symsqrt(cov)

        self._create_optimizers(fine_tune, lr_L, lr_h, optim_L, optim_h)

        for iter in range(int(nit)):
            if learn_h:
                _start_model_tracking(self._h)
                evals, evecs = self.get_L_decomp(ignore=0)
                for _ in range(int(nit_h_per_iter)):
                    self._optim_h_step(target, evals, evecs)
                _stop_model_tracking(self._h)

            cost = self._optim_L_step(target)
            self.loss_hist.append(cost)

            _be_verbose(iter, nit, verbose, cost)
Example #10
0
    def A(self, value):

        _check_valid_adjacency(value)
        self._A = to_torch(value)
        self._L = A_to_L(value)
        self._w = A_to_w(self._A)
Example #11
0
def impute_graph(y,
                 lr=.01,
                 lr_nnet=1e-3,
                 nit_nnet=3,
                 start=None,
                 h_start=None,
                 n_epochs=3000,
                 random_seed=23,
                 verbose=100,
                 learn_h=True,
                 stochastic=False,
                 batch_size=32):
    """
    Impute graph by alterating between fitting neural network
    and Laplacian.
    
    y:
        Data matrix (observations x features)
    lr:
        Learning rate for Laplacian update
    lr_nnet:
        Learning rate for neural network update
    nit_nnet:
        Number of neural network updates per Laplacian update
    start:
        Initialization for adjacency matrix
    h_start:
        Initialization for neural network. If learn_h is set to true,
        must be torch neural network
    n_epochs:
        Number of Laplacian updates
    random_seed:
        For Laplacian initialization
    verbose:
        Number of updates after which to print status
    learn_h:
        Specifies whether to learn h jointly with L. If set to false, 
        h_start must be given.

    """
    _, d = y.shape

    C = to_torch(np.cov(y.T))
    target = symsqrt(C)

    history = pd.DataFrame(columns=[
        'Loss', 'Nb_Sign_Switch', 'Nb_Zero', 'Nb_One', 'Mean_Step',
        'Median_Step', 'Vals_sum'
    ],
                           index=range(n_epochs))

    torch.manual_seed(random_seed)
    np.random.seed(random_seed)

    best_cost = 1e10
    best_vals = None
    best_h = None

    if start is None:
        vals = torch.rand(size=(1,
                                (d * (d - 1) // 2)), dtype=torch.float64) - 0.5
    else:
        start = to_torch(start)
        vals = start[torch.triu(torch.ones(d, d), 1) == 1].unsqueeze_(0)

    # Initialize h
    if h_start is None:
        if not learn_h:
            raise ValueError(
                'h_start must be given if learn_h is set to False')
        h = NNet()
    else:
        h = h_start

    nnet_optimizer = torch.optim.SGD(h.parameters(), lr=lr_nnet)
    l_optimizer = torch.optim.Adam([vals], lr=lr)

    for epoch in range(n_epochs):
        if stochastic:
            touse = np.random.choice(y.shape[0], batch_size, replace=False)
            C_stoch = np.cov(y[touse].T)
            target = symsqrt(torch.Tensor(C_stoch))

        if learn_h:
            vals.requires_grad = False
            L = vals_to_L(torch.sigmoid(vals))

            h = start_model_tracking(h)
            h = fit_filter(L, target, h, nnet_optimizer, n_iters=nit_nnet)
            h = stop_model_tracking(h)

        vals.requires_grad = True
        l_optimizer.zero_grad()
        L = vals_to_L(torch.sigmoid(vals))

        filtered_L = filter_matrix(L, h)
        cost = ((filtered_L - target)**2).sum()

        if best_cost > cost.item():
            best_cost = cost.item()
            best_vals = torch.sigmoid(vals)
            best_h = deepcopy(h)

        try:
            cost.backward()
        except RuntimeError as e:
            print(e)
            return vals_to_A(proj_vals).detach().numpy(), history, h

        # Update
        l_optimizer.step()

        # History
        history.loc[epoch] = {
            'Loss': cost.item(),
            #'Nb_Sign_Switch': ((proj_vals>.5) & ~(vals>.5)).sum().item(),
            'Nb_Zero': (vals <= 0).sum().item(),
            'Nb_One': (vals >= 1).sum().item(),
            #'Mean_Step': torch.mean(proj_vals.grad*lr).item(),
            #'Median_Step': torch.median(proj_vals.grad*lr).item(),
            'Vals_sum': vals.sum().item()
        }

        if verbose and (epoch == 0 or
                        (epoch + 1) % verbose == 0 or epoch + 1 == n_epochs):
            print('\r[Epoch %4d/%d] loss: %f' %
                  (epoch + 1, n_epochs, cost.item()),
                  end='')

    return vals_to_A(best_vals).detach().numpy(), history, best_h
Example #12
0
def project_onto_leq_one(vals):
    """Projects vals onto space of values with less or equal to one."""
    large_vals = vals.clone().detach().numpy()
    large_vals[large_vals <= 1] = 1
    return vals - to_torch(large_vals - 1)
Example #13
0
def project_onto_pos(vals):
    """Projects vals onto space of non-negative matrices."""
    neg_vals = vals.clone().detach().numpy()
    neg_vals[neg_vals > 0] = 0
    return vals.float() - to_torch(neg_vals)