コード例 #1
0
    def encodeRelu(self, inNeuron, outNeuron, netPrefix):
        enc = ''
        layerIndex = inNeuron.layer
        rowIndex = inNeuron.row

        delta = Variable(layerIndex, rowIndex, netPrefix, 'd', 'Int')
        delta.setLo(0)
        delta.setHi(1)

        # later use bound on sn for m
        m = 99999
        dm = self.makeMult(ffp(m), delta.name)

        enc += self.makeGeq(outNeuron.name, '0')
        enc += '\n' + self.makeGeq(outNeuron.name, inNeuron.name)
        enc += '\n' + self.makeLeq(
            self.makeSum([inNeuron.name, self.makeNeg(dm)]), '0')
        enc += '\n' + self.makeGeq(
            self.makeSum([inNeuron.name,
                          ffp(m), self.makeNeg(dm)]), '0')
        enc += '\n' + self.makeLeq(
            outNeuron.name,
            self.makeSum([inNeuron.name,
                          ffp(m), self.makeNeg(dm)]))
        enc += '\n' + self.makeLeq(outNeuron.name, dm)

        return (enc, [delta])
コード例 #2
0
    def makePreambleReadable(self):
        preamble = '(set-option :produce-models true)\n(set-logic AUFLIRA)'
        decls = ''
        bounds = ''
        for list in self.vars:
            for var in list:
                decls += '\n' + '(declare-const ' + var.name + ' ' + var.type + ')'
                if var.hasHi:
                    bounds += '\n' + self.makeLeq(var.name, ffp(var.hi))
                if var.hasLo:
                    bounds += '\n' + self.makeGeq(var.name, ffp(var.lo))

        return preamble + decls + '\n; ---- Bounds ----' + bounds
コード例 #3
0
    def encodeMaxPoolReadable(self, inNeurons, outNeuron, netPrefix):
        num = len(inNeurons)
        enc = ''
        vars = []

        # TODO: think of other ways to name the vars uniquely (too long names)
        if num == 1:
            enc = self.makeEq(inNeurons[0].name, outNeuron.name)
            return (enc, vars)

        if num == 2:
            maxVarA = inNeurons[0]
            maxVarB = inNeurons[1]

        if num > 2:
            maxVarA = Variable(outNeuron.layer, outNeuron.row, netPrefix,
                               outNeuron.name + 'a')
            maxVarB = Variable(outNeuron.layer, outNeuron.row, netPrefix,
                               outNeuron.name + 'b')
            enc1, vars1 = self.encodeMaxPoolReadable(inNeurons[:num // 2],
                                                     maxVarA)
            enc2, vars2 = self.encodeMaxPoolReadable(inNeurons[num // 2:],
                                                     maxVarB)

            enc += enc1 + '\n' + enc2
            vars.append(vars1)
            vars.append(vars2)

        delta = Variable(outNeuron.layer, outNeuron.row, netPrefix,
                         outNeuron.name + 'd', 'Int')
        delta.setLo(0)
        delta.setHi(1)
        vars.append(delta)

        m = 99999

        md = self.makeMult(ffp(m), delta.name)
        enc += '\n' + self.makeGeq(outNeuron.name, maxVarA.name)
        enc += '\n' + self.makeGeq(outNeuron.name, maxVarB.name)
        enc += '\n' + self.makeLeq(outNeuron.name,
                                   self.makeSum([maxVarA.name, md]))
        enc += '\n' + self.makeLeq(
            outNeuron.name,
            self.makeSum([maxVarB.name, ffp(m),
                          self.makeNeg(md)]))

        return (enc, vars)
コード例 #4
0
    def encodeLinearLayer(self, weights, numNeurons, layerIndex, netPrefix):
        enc = '; --- linear constraints layer ' + str(layerIndex) + ' ---'
        prevNeurons = self.vars[-1]
        prevNum = len(prevNeurons)
        currentNeurons = []
        for i in range(0, numNeurons):
            var = Variable(layerIndex, i, netPrefix, 'x')
            currentNeurons.append(var)
            terms = [
                self.makeMult(ffp(weights[row][i]), prevNeurons[row].name)
                for row in range(0, prevNum)
            ]
            terms.append(ffp(weights[-1][i]))
            enc += '\n' + self.makeEq(var.name, self.makeSum(terms))

        self.vars.append(currentNeurons)

        return enc
コード例 #5
0
    def encodeOneHotLayerReadable(self, layerIndex, netPrefix=''):
        inNeurons = self.vars[-1]
        maxNeuron = Variable(layerIndex, 0, netPrefix, 'max')
        maxEnc, maxVars = self.encodeMaxPoolReadable(inNeurons, maxNeuron,
                                                     netPrefix)

        outNeurons = []
        diffNeurons = []
        diffConstraints = ''
        enc = ''
        for i in range(0, len(inNeurons)):
            out = Variable(layerIndex + 1, i, netPrefix, 'o', 'Int')
            out.setLo(0)
            out.setHi(1)
            outNeurons.append(out)

            inNeuron = inNeurons[i]

            diff = Variable(layerIndex + 1, i, netPrefix, 'x')
            diffNeurons.append(diff)
            diffConstraints += '\n' + self.makeEq(
                diff.name,
                self.makeSum([inNeuron.name,
                              self.makeNeg(maxNeuron.name)]))

            enc += '\n' + self.makeGt(self.makeMult(ffp(diff.hi), out.name),
                                      diff.name)
            sum = self.makeSum([
                str(diff.lo),
                self.makeNeg(self.makeMult(ffp(diff.lo), out.name))
            ])
            enc += '\n' + self.makeGeq(diff.name, sum)

        self.vars.append(maxVars)
        self.vars.append([maxNeuron])
        self.vars.append(diffNeurons)
        self.vars.append(outNeurons)

        return '; --- one hot layer constraints ---' + maxEnc + diffConstraints + enc
コード例 #6
0
ファイル: correlation.py プロジェクト: AnnaTruzzi/pingouin
def rcorr(self,
          method='pearson',
          upper='pval',
          decimals=3,
          padjust=None,
          stars=True,
          pval_stars={
              0.001: '***',
              0.01: '**',
              0.05: '*'
          }):
    """
    Correlation matrix of a dataframe with p-values and/or sample size on the
    upper triangle (:py:class:`pandas.DataFrame` method).

    This method is a faster, but less exhaustive, matrix-version of the
    :py:func:`pingouin.pairwise_corr` function. It is based on the
    :py:func:`pandas.DataFrame.corr` method. Missing values are automatically
    removed from each pairwise correlation.

    Parameters
    ----------
    self : :py:class:`pandas.DataFrame`
        Input dataframe.
    method : str
        Correlation method. Can be either 'pearson' or 'spearman'.
    upper : str
        If 'pval', the upper triangle of the output correlation matrix shows
        the p-values. If 'n', the upper triangle is the sample size used in
        each pairwise correlation.
    decimals : int
        Number of decimals to display in the output correlation matrix.
    padjust : string or None
        Method used for adjustment of pvalues.
        Available methods are ::

        'none' : no correction
        'bonf' : one-step Bonferroni correction
        'sidak' : one-step Sidak correction
        'holm' : step-down method using Bonferroni adjustments
        'fdr_bh' : Benjamini/Hochberg FDR correction
        'fdr_by' : Benjamini/Yekutieli FDR correction
    stars : boolean
        If True, only significant p-values are displayed as stars using the
        pre-defined thresholds of ``pval_stars``. If False, all the raw
        p-values are displayed.
    pval_stars : dict
        Significance thresholds. Default is 3 stars for p-values < 0.001,
        2 stars for p-values < 0.01 and 1 star for p-values < 0.05.

    Returns
    -------
    rcorr : :py:class:`pandas.DataFrame`
        Correlation matrix, of type str.

    Examples
    --------
    >>> import numpy as np
    >>> import pandas as pd
    >>> import pingouin as pg
    >>> # Load an example dataset of personality dimensions
    >>> df = pg.read_dataset('pairwise_corr').iloc[:, 1:]
    >>> # Add some missing values
    >>> df.iloc[[2, 5, 20], 2] = np.nan
    >>> df.iloc[[1, 4, 10], 3] = np.nan
    >>> df.head().round(2)
       Neuroticism  Extraversion  Openness  Agreeableness  Conscientiousness
    0         2.48          4.21      3.94           3.96               3.46
    1         2.60          3.19      3.96            NaN               3.23
    2         2.81          2.90       NaN           2.75               3.50
    3         2.90          3.56      3.52           3.17               2.79
    4         3.02          3.33      4.02            NaN               2.85

    >>> # Correlation matrix on the four first columns
    >>> df.iloc[:, 0:4].rcorr()
                  Neuroticism Extraversion Openness Agreeableness
    Neuroticism             -          ***                     **
    Extraversion        -0.35            -      ***
    Openness            -0.01        0.265        -           ***
    Agreeableness      -0.134        0.054    0.161             -

    >>> # Spearman correlation and Holm adjustement for multiple comparisons
    >>> df.iloc[:, 0:4].rcorr(method='spearman', padjust='holm')
                  Neuroticism Extraversion Openness Agreeableness
    Neuroticism             -          ***                     **
    Extraversion       -0.325            -      ***
    Openness           -0.027         0.24        -           ***
    Agreeableness       -0.15         0.06    0.173             -

    >>> # Compare with the pg.pairwise_corr function
    >>> pairwise = df.iloc[:, 0:4].pairwise_corr(method='spearman',
    ...                                          padjust='holm')
    >>> pairwise[['X', 'Y', 'r', 'p-corr']].round(3)  # Do not show all columns
                  X              Y      r  p-corr
    0   Neuroticism   Extraversion -0.325   0.000
    1   Neuroticism       Openness -0.027   0.543
    2   Neuroticism  Agreeableness -0.150   0.002
    3  Extraversion       Openness  0.240   0.000
    4  Extraversion  Agreeableness  0.060   0.358
    5      Openness  Agreeableness  0.173   0.000

    >>> # Display the raw p-values with four decimals
    >>> df.iloc[:, [0, 1, 3]].rcorr(stars=False, decimals=4)
                  Neuroticism Extraversion Agreeableness
    Neuroticism             -       0.0000        0.0028
    Extraversion      -0.3501            -        0.2305
    Agreeableness      -0.134       0.0539             -

    >>> # With the sample size on the upper triangle instead of the p-values
    >>> df.iloc[:, [0, 1, 2]].rcorr(upper='n')
                 Neuroticism Extraversion Openness
    Neuroticism            -          500      497
    Extraversion       -0.35            -      497
    Openness           -0.01        0.265        -
    """
    from numpy import triu_indices_from as tif
    from numpy import format_float_positional as ffp
    from scipy.stats import pearsonr, spearmanr

    # Safety check
    assert isinstance(pval_stars, dict), 'pval_stars must be a dictionnary.'
    assert isinstance(decimals, int), 'decimals must be an int.'
    assert method in ['pearson', 'spearman'], 'Method is not recognized.'
    assert upper in ['pval', 'n'], 'upper must be either `pval` or `n`.'
    mat = self.corr(method=method).round(decimals)
    if upper == 'n':
        mat_upper = self.corr(method=lambda x, y: len(x)).astype(int)
    else:
        if method == 'pearson':
            mat_upper = self.corr(method=lambda x, y: pearsonr(x, y)[1])
        else:
            # Method = 'spearman'
            mat_upper = self.corr(method=lambda x, y: spearmanr(x, y)[1])

        if padjust is not None:
            pvals = mat_upper.values[tif(mat, k=1)]
            mat_upper.values[tif(mat, k=1)] = multicomp(pvals,
                                                        alpha=0.05,
                                                        method=padjust)[1]

    # Convert r to text
    mat = mat.astype(str)
    np.fill_diagonal(mat.values, '-')  # Inplace modification of the diagonal

    if upper == 'pval':

        def replace_pval(x):
            for key, value in pval_stars.items():
                if x < key:
                    return value
            return ''

        if stars:
            # Replace p-values by stars
            mat_upper = mat_upper.applymap(replace_pval)
        else:
            mat_upper = mat_upper.applymap(
                lambda x: ffp(x, precision=decimals))

    # Replace upper triangle by p-values or n
    mat.values[tif(mat, k=1)] = mat_upper.values[tif(mat, k=1)]
    return mat
コード例 #7
0
    def encodeEquivalence(self,
                          layers1,
                          layers2,
                          input_lowerBounds,
                          input_upperBounds,
                          withOneHot=False):
        self.encodeInputsReadable(input_lowerBounds, input_upperBounds, 'I')
        inputVars = self.vars[-1]

        encNN1 = self.encodeAllLayers(layers1, 'A', withOneHot)
        nn1Outs = self.vars[-1]

        # only need to encode input vars once for both nets,
        # remember position in list, so we can delete duplicate later
        lengthNN1 = len(self.vars)
        self.vars.append(inputVars)

        encNN2 = self.encodeAllLayers(layers2, 'B', withOneHot)
        nn2Outs = self.vars[-1]

        # remove duplicate input vars
        del self.vars[lengthNN1]

        if not len(nn1Outs) == len(nn2Outs):
            raise IOError(
                'only NNs with equal number of outputs can be equivalent')

        eqConstraints = '; --- Equality Constraints --- '
        u = 99999
        l = -99999
        deltas = []
        for out1, out2 in zip(nn1Outs, nn2Outs):
            # out1 - out2 should be 0, if they are equal
            diff = self.makeSum([out1.name, self.makeNeg(out2.name)])

            deltaG0 = Variable(0, out1.row, 'E', 'dG0', 'Int')
            deltaG0.setLo(0)
            deltaG0.setHi(1)
            deltas.append(deltaG0)

            eqConstraints += '\n' + self.makeLeq(
                diff, self.makeMult(ffp(u), deltaG0.name))
            eqConstraints += '\n' + self.makeGt(
                diff,
                self.makeSum([
                    ffp(l),
                    self.makeNeg(self.makeMult(ffp(l), deltaG0.name))
                ]))

            deltaL0 = Variable(0, out1.row, 'E', 'dL0', 'Int')
            deltaL0.setLo(0)
            deltaL0.setHi(1)
            deltas.append(deltaL0)

            eqConstraints += '\n' + self.makeLt(
                diff,
                self.makeSum([
                    ffp(u),
                    self.makeNeg(self.makeMult(ffp(u), deltaL0.name))
                ]))
            eqConstraints += '\n' + self.makeGeq(
                diff, self.makeMult(ffp(l), deltaL0.name))

        # at least one of the not-equals should be true
        eqConstraints += '\n' + self.makeGeq(
            self.makeSum([delta.name for delta in deltas]), ffp(1))

        self.vars.append(deltas)

        preamble = self.makePreambleReadable()
        suffix = self.makeSuffix()

        return preamble + '\n' + encNN1 + '\n' + encNN2 + '\n' + eqConstraints + '\n' + suffix