Esempio n. 1
0
def _plot_condition_number(matrix):
    """Allows matrices with shape (F, K)."""
    with context_manager(figure_size=(24, 3)):
        plt.plot(np.log(np.linalg.cond(matrix)))
        plt.xlabel('frequency bin')
        plt.ylabel('log cond A')
        plt.show()
Esempio n. 2
0
def _plot_affiliations(*affiliation_list):
    """Each argument must have shape (F, K, T)."""
    with context_manager():
        facet_grid([x[:, 0, :].T for x in affiliation_list],
                   plot.mask,
                   colwrap=max(2, len(affiliation_list)))
        plt.show()
Esempio n. 3
0
    def fit(
            self, Y, embedding, initialization, iterations=100,
            min_concentration_vmf=0, max_concentration_vmf=500,
            hermitize=True, trace_norm=True, eigenvalue_floor=1e-10,
            inverse='inv'
    ):
        """

        Args:
            Y: Mix with shape (F, D, T).
            embedding: Embedding from Deep Clustering with shape (F*T, E).
            initialization: Shape (F, K, T)
            iterations: Most of the time 10 iterations are acceptable.
            min_concentration_vmf: For numerical stability reasons.
            max_concentration_vmf: For numerical stability reasons.

        Returns:
        """
        D = Y.shape[-1]
        Y_for_psd = np.copy(np.swapaxes(Y, -2, -1), 'C')
        Y_for_pdf = np.copy(Y, 'C')
        embedding = np.copy(np.swapaxes(embedding, -2, -1), 'C')

        # F, K, T = initialization.shape[-3:]
        affiliations = np.copy(initialization)
        power = np.ones_like(affiliations)

        for i in range(iterations):
            # E step
            if i > 0:
                affiliations, power = self._predict(Y_for_pdf, embedding.T,
                                                    inverse=inverse)

            # M step
            self.pi = affiliations.mean(axis=-1)

            self.covariance = get_power_spectral_density_matrix(
                Y_for_psd,
                np.copy(np.clip(affiliations, self.eps, 1 - self.eps) / power,
                        'C'),
                sensor_dim=-2, source_dim=-2, time_dim=-1
            )

            if hermitize:
                self.covariance = (
                                      self.covariance
                                      + np.swapaxes(self.covariance.conj(), -1,
                                                    -2)
                                  ) / 2

            if trace_norm:
                self.covariance /= np.einsum(
                    '...dd', self.covariance
                )[..., None, None]

            # Deconstructs covariance matrix and constrains eigenvalues
            eigenvals, eigenvecs = np.linalg.eigh(self.covariance)
            eigenvals = eigenvals.real
            eigenvals = np.maximum(
                eigenvals,
                np.max(eigenvals, axis=-1, keepdims=True) * eigenvalue_floor
            )
            diagonal = np.einsum('de,fkd->fkde', np.eye(D), eigenvals)
            self.covariance = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, diagonal, eigenvecs.conj()
            )
            self.determinant = np.prod(eigenvals, axis=-1)
            inverse_diagonal = np.einsum('de,fkd->fkde', np.eye(D),
                                         1 / eigenvals)
            self.precision = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, inverse_diagonal,
                eigenvecs.conj()
            )

            if self.visual_debug:
                with context_manager(figure_size=(24, 3)):
                    plt.plot(np.log10(
                        np.max(eigenvals, axis=-1)
                        / np.min(eigenvals, axis=-1)
                    ))
                    plt.xlabel('frequency bin')
                    plt.ylabel('eigenvalue spread')
                    plt.show()

            self.mu, self.kappa_vmf = VonMisesFisher.fit(
                embedding.T,
                np.clip(reshape(affiliations, 'fkt->k,t*f'), self.eps,
                        1 - self.eps),
                min_concentration=min_concentration_vmf,
                max_concentration=max_concentration_vmf
            )
Esempio n. 4
0
    def fit(
            self, Y, initialization, iterations=100,
            hermitize=True, trace_norm=True, eigenvalue_floor=1e-10
    ):
        """Fit a cACGMM.

        Args:
            Y: Normalized observations with shape (..., T, D).
            iterations:
            initialization: Shape (..., K, T).
        """
        F, T, D = Y.shape
        K = initialization.shape[-2]
        Y_for_psd = np.copy(np.swapaxes(Y, -2, -1), 'C')[..., None, :, :]
        Y_for_pdf = np.copy(Y, 'C')
        D = Y_for_pdf.shape[-1]

        affiliations = np.copy(initialization)
        quadratic_form = np.ones_like(affiliations)

        for i in range(iterations):
            # E step
            if i > 0:
                # Equation 12
                affiliations, quadratic_form = self._predict(Y_for_pdf)

            self.pi = np.mean(affiliations, axis=-1)
            assert self.pi.shape == (F, K), self.pi.shape

            mask = affiliations[..., None, :]
            assert mask.shape == (F, K, 1, T), mask.shape
            self.covariance = D * np.einsum(
                '...dt,...et->...de',
                (mask / quadratic_form[..., None, :]) * Y_for_psd,
                Y_for_psd.conj()
            )
            normalization = np.sum(mask, axis=-1, keepdims=True)
            self.covariance /= normalization
            assert self.covariance.shape == (F, K, D, D), self.covariance.shape

            if hermitize:
                self.covariance = (
                                      self.covariance
                                      + np.swapaxes(self.covariance.conj(), -1,
                                                    -2)
                                  ) / 2

            if trace_norm:
                self.covariance /= np.einsum(
                    '...dd', self.covariance
                )[..., None, None]

            eigenvals, eigenvecs = np.linalg.eigh(self.covariance)
            eigenvals = eigenvals.real
            eigenvals = np.maximum(
                eigenvals,
                np.max(eigenvals, axis=-1, keepdims=True) * eigenvalue_floor
            )
            diagonal = np.einsum('de,fkd->fkde', np.eye(D), eigenvals)
            self.covariance = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, diagonal, eigenvecs.conj()
            )
            self.determinant = np.prod(eigenvals, axis=-1)
            inverse_diagonal = np.einsum('de,fkd->fkde', np.eye(D),
                                         1 / eigenvals)
            self.precision = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, inverse_diagonal,
                eigenvecs.conj()
            )

            if self.visual_debug:
                with context_manager(figure_size=(24, 3)):
                    plt.plot(np.log10(
                        np.max(eigenvals, axis=-1)
                        / np.min(eigenvals, axis=-1)
                    ))
                    plt.xlabel('frequency bin')
                    plt.ylabel('eigenvalue spread')
                    plt.show()
                with context_manager(figure_size=(24, 3)):
                    plt.plot(self.pi)
                    plt.show()
Esempio n. 5
0
    def fit(
            self, Y, initialization, iterations=100,
            hermitize=True, trace_norm=True, eigenvalue_floor=1e-10,
            inverse='inv'
    ):
        """ EM for cGMM with any number of independent dimensions.

        Does not support sequence lengths.
        Can later be extended to accept more initializations.

        Args:
            Y: Mix with shape (..., T, D).
            iterations:
            initialization: Shape (..., K, T).
        """
        Y_for_psd = np.copy(np.swapaxes(Y, -2, -1), 'C')
        Y_for_pdf = np.copy(Y, 'C')
        D = Y_for_pdf.shape[-1]

        affiliations = np.copy(initialization)
        power = np.ones_like(affiliations)

        for i in range(iterations):
            # E step
            if i > 0:
                # Equation 10
                affiliations, power = self._predict(Y_for_pdf)

            # M step
            if self.use_mixture_weights:
                self.pi = np.mean(affiliations, axis=-1)

            # Equation 6
            self.covariance = get_power_spectral_density_matrix(
                Y_for_psd,
                np.copy(np.clip(affiliations, self.eps, 1 - self.eps) / power,
                        'C'),
                sensor_dim=-2, source_dim=-2, time_dim=-1
            )

            if hermitize:
                self.covariance = (
                                      self.covariance
                                      + np.swapaxes(self.covariance.conj(), -1,
                                                    -2)
                                  ) / 2

            if trace_norm:
                self.covariance /= np.einsum(
                    '...dd', self.covariance
                )[..., None, None]

            # Deconstructs covariance matrix and constrains eigenvalues
            eigenvals, eigenvecs = np.linalg.eigh(self.covariance)
            eigenvals = eigenvals.real
            eigenvals = np.maximum(
                eigenvals,
                np.max(eigenvals, axis=-1, keepdims=True) * eigenvalue_floor
            )
            diagonal = np.einsum('de,fkd->fkde', np.eye(D), eigenvals)
            self.covariance = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, diagonal, eigenvecs.conj()
            )
            self.determinant = np.prod(eigenvals, axis=-1)
            inverse_diagonal = np.einsum('de,fkd->fkde', np.eye(D),
                                         1 / eigenvals)
            self.precision = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, inverse_diagonal,
                eigenvecs.conj()
            )

            if self.visual_debug:
                with context_manager(figure_size=(24, 3)):
                    plt.plot(np.log10(
                        np.max(eigenvals, axis=-1)
                        / np.min(eigenvals, axis=-1)
                    ))
                    plt.xlabel('frequency bin')
                    plt.ylabel('eigenvalue spread')
                    plt.show()
                with context_manager(figure_size=(24, 3)):
                    plt.plot(self.pi)
                    plt.show()
Esempio n. 6
0
    def fit(
            self, Y, embedding, initialization, iterations=100,
            min_concentration_vmf=0, max_concentration_vmf=500,
            eigenvalue_floor=1e-10
    ):
        """Fit a vMFcACGMM.

        Args:
            Y: Mix with shape (F, D, T).
            embedding: Embedding from Deep Clustering with shape (F*T, E).
            initialization: Shape (F, K, T)
            iterations: Most of the time 10 iterations are acceptable.
            min_concentration_vmf: For numerical stability reasons.
            max_concentration_vmf: For numerical stability reasons.

        Returns:
        """
        F, T, D = Y.shape
        D = Y.shape[-1]
        Y_for_psd = np.copy(np.swapaxes(Y, -2, -1), 'C')
        Y_for_pdf = np.copy(Y, 'C')
        embedding = np.copy(np.swapaxes(embedding, -2, -1), 'C')

        # F, K, T = initialization.shape[-3:]
        affiliations = np.copy(initialization)
        quadratic_form = np.ones_like(affiliations)

        for i in range(iterations):
            # E step
            if i > 0:
                affiliations, quadratic_form = self._predict(Y_for_pdf,
                                                             embedding)

            # M step
            self.pi = affiliations.mean(axis=-1)
            assert self.pi.shape == (F, K), self.pi.shape

            mask = affiliations[..., None, :]
            assert mask.shape == (F, K, 1, T), mask.shape
            self.covariance = D * np.einsum(
                '...dt,...et->...de',
                (mask / quadratic_form[..., None, :]) * Y_for_psd,
                Y_for_psd.conj()
            )
            normalization = np.sum(mask, axis=-1, keepdims=True)
            self.covariance /= normalization
            assert self.covariance.shape == (F, K, D, D), self.covariance.shape

            # Deconstructs covariance matrix and constrains eigenvalues
            eigenvals, eigenvecs = np.linalg.eigh(self.covariance)
            eigenvals = eigenvals.real
            eigenvals = np.maximum(
                eigenvals,
                np.max(eigenvals, axis=-1, keepdims=True) * eigenvalue_floor
            )
            diagonal = np.einsum('de,fkd->fkde', np.eye(D), eigenvals)
            self.covariance = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, diagonal, eigenvecs.conj()
            )
            self.determinant = np.prod(eigenvals, axis=-1)
            inverse_diagonal = np.einsum('de,fkd->fkde', np.eye(D),
                                         1 / eigenvals)
            self.precision = np.einsum(
                'fkwx,fkxy,fkzy->fkwz', eigenvecs, inverse_diagonal,
                eigenvecs.conj()
            )

            if self.visual_debug:
                with context_manager(figure_size=(24, 3)):
                    plt.plot(np.log10(
                        np.max(eigenvals, axis=-1)
                        / np.min(eigenvals, axis=-1)
                    ))
                    plt.xlabel('frequency bin')
                    plt.ylabel('eigenvalue spread')
                    plt.show()

            self.mu, self.kappa_vmf = VonMisesFisher.fit(
                embedding.T,
                np.clip(reshape(affiliations, 'fkt->k,t*f'), self.eps,
                        1 - self.eps),
                min_concentration=min_concentration_vmf,
                max_concentration=max_concentration_vmf
            )