Esempio n. 1
0
File: logreg.py Progetto: idfah/cebl
    def gradient(self, x, g, returnError=True):
        x = np.asarray(x)
        g = np.asarray(g)

        probs = self.probs(x)

        delta = (probs - g) / probs.size

        penMask = np.ones_like(self.weights)
        penMask[-1, :] = 0.0

        grad = (
            util.bias(x).T.dot(delta) + self.elastic * 2.0 * self.penalty *
            penMask * self.weights / self.weights.size +  # L2-norm penalty
            (1.0 - self.elastic) * self.penalty * penMask *
            np.sign(self.weights) / self.weights.size)  # L1-norm penalty

        gf = grad.ravel()

        if returnError:
            pf = self.weights[:-1, :].ravel()
            err = (
                -np.mean(g * np.log(util.capZero(probs))) + self.elastic *
                self.penalty * pf.dot(pf) / pf.size +  # L2-norm penalty
                (1.0 - self.elastic) * self.penalty * np.mean(np.abs(pf))
            )  # L1-norm penalty

            return err, gf
        else:
            return gf
Esempio n. 2
0
File: logreg.py Progetto: idfah/cebl
    def error(self, x, g):
        x = np.asarray(x)
        g = np.asarray(g)

        likes = np.log(util.capZero(self.probs(x)))

        pf = self.weights[:-1, :].ravel()
        return (-np.mean(g * likes) + self.elastic * self.penalty *
                pf.dot(pf) / pf.size +  # L2-norm penalty
                (1.0 - self.elastic) * self.penalty * np.mean(np.abs(pf))
                )  # L1-norm penalty
Esempio n. 3
0
File: stand.py Progetto: idfah/cebl
    def initZmus(self, x):
        x = np.asarray(x)

        self.shift = np.mean(x, axis=0)
        #self.scale = util.capZero(np.std(x, axis=0))
        self.scale = np.std(x, axis=0)

        # best way to handle this? XXX - idfah
        if np.any(np.isclose(self.scale, 0.0)):
            print(
                'Standardizer Warning: Some dimensions are constant, capping zeros.'
            )
            self.scale = util.capZero(self.scale)
Esempio n. 4
0
File: logreg.py Progetto: idfah/cebl
    def error(self, x, g):
        """Compute the negative log likelyhood for given inputs and targets.
        Error function for Optable interface.

        Args:
            x:

            g:

        Returns:
            The scalar negative log likelyhood.
        """
        x = np.asarray(x)
        g = np.asarray(g)

        likes = np.log(util.capZero(self.probs(x)))

        return -np.mean(g * likes)
Esempio n. 5
0
File: logreg.py Progetto: idfah/cebl
    def gradient(self, x, g, returnError=True):
        x = np.asarray(x)
        g = np.asarray(g)

        probs = self.probs(x)

        delta = (probs - g) / probs.size

        grad = util.bias(x).T.dot(delta)

        gf = grad.ravel()

        if returnError:
            err = -np.mean(g * np.log(util.capZero(probs)))

            return err, gf
        else:
            return gf
Esempio n. 6
0
File: da.py Progetto: idfah/cebl
    def probs(self, x):
        """Compute class probabilities.

        Args:
            x:  Input data.  A numpy array with shape (nObs[,nIn]).

        Returns:
            Numpy array with shape (nObs,nCls) containing the probability values.

        Notes:
            This is less precise than discrim.  Only use probs if you
            need the class probabilites for each observation.
        """
        # log probability densities
        logDens = self.logDens(x)

        # density_i*P(C=i) / sum_j(density_j*P(C=j))
        mx = np.max((np.max(logDens), 0.0))
        dens = util.capZero(np.exp(logDens-mx))
        return dens / dens.sum(axis=1)[:,None]
Esempio n. 7
0
    def plotFreqResponse(self,
                         freqs=None,
                         scale='linear',
                         showCorners=True,
                         label='Frequency Response',
                         ax=None,
                         **kwargs):
        """Plot the frequency response of the filter.
        """
        if ax is None:
            fig = plt.figure()
            ax = fig.add_subplot(1, 1, 1)

        freqs, responses = self.frequencyResponse(freqs=freqs)
        freqs = freqs * self.sampRate * 0.5 / np.pi
        responseMags = np.abs(responses)

        scale = scale.lower()
        if scale == 'linear':
            ax.set_ylabel('Gain')
        elif scale == 'log':
            ax.set_ylabel('Gain')
            ax.set_yscale('symlog')
        elif scale == 'db':
            responseMags = 10.0 * np.log10(util.capZero(responseMags**2))
            ax.set_ylabel('Gain (dB)')
        else:
            raise RuntimeError('Invalid scale: ' + str(scale) + '.')

        lines = ax.plot(freqs, responseMags, label=label, **kwargs)

        result = {'ax': ax, 'lines': lines}

        if showCorners:
            if scale == 'db':
                halfPow = 10.0 * np.log10(0.5)
                halfAmp = 10.0 * np.log10(0.5**2)
                mn = np.min(responseMags)
                mx = np.max(responseMags)
            else:
                halfPow = np.sqrt(0.5)
                halfAmp = 0.5
                mn = np.min(responseMags)
                mn = np.min((mn, 0.0))
                mx = np.max(responseMags)
                mx = np.max((mx, 1.0))

            halfPowerLines = ax.hlines(halfPow,
                                       0.0,
                                       0.5 * self.sampRate,
                                       color='red',
                                       linestyle='-.',
                                       label='Half Power')
            result['halfPowerLines'] = halfPowerLines

            halfAmpLines = ax.hlines(halfAmp,
                                     0.0,
                                     0.5 * self.sampRate,
                                     color='orange',
                                     linestyle=':',
                                     label='Half Amplitude')
            result['halfAmpLines'] = halfAmpLines

            cornerLines = ax.vlines((self.lowFreq, self.highFreq),
                                    mn,
                                    mx,
                                    color='violet',
                                    linestyle='--',
                                    label='Corners')
            result['cornerLines'] = cornerLines

        ax.set_xlabel('Frequency (Hz)')
        ax.set_ylim((0.0, 1.0))

        return result
Esempio n. 8
0
File: alopex.py Progetto: idfah/cebl
def alopex(optable,
           stepSize=0.0075,
           tempInit=10000,
           tempIter=20,
           accuracy=0.0,
           precision=0.0,
           divergeThresh=1.0e10,
           maxIter=10000,
           pTrace=False,
           tTrace=False,
           eTrace=False,
           callback=None,
           verbose=False,
           *args,
           **kwargs):
    """ ALgorithm Of Pattern EXtraction (ALOPEX)

    Args:
        optable:

        stepSize:           Step size.

        tempInit:           

        tempIter:

        accuracy:           Terminate if current value of the error funciton
                            falls below this value.

        precision:          Terminate if change in the error function falls
                            below this value.

        divergeThresh:      Terminate if the value of the error function
                            exceeds this value.

        maxIter:            Terminate once current iteration reaches this value.

        pTrace:             If True, a list of matrices (one for each parameter
                            matrix) is included in the final results that
                            contains a history of the parameters during
                            optimization.  If False (default), then a history
                            is not kept.

        tTrace:

        eTrace:             If True, an array containing a history of the error
                            function during optimization is included in the
                            final results.  If False (default), then a history
                            is not kept.

        callback:           

        verbose:            Print extra information to standard out during the
                            training procedure.
    
        args, kwargs:       Arguments passed to optable.gradients.

    Returns:
        A dictionary containing the following keys:

        params:     A numpy array containing the optimized parameters.

        error:      Final value of the error function.

        iteration:  The number of iterations performed.

        reason:     A string describing the reason for termination.

        eTrace:     A list containing the value of the error function at each
                    iteration.  Only returned if eTrace is True.

        pTrace:     A list containing a copy of the parameters at each
                    iteration.  Only returned if pTrace is True.

    Refs:
    """
    params = optable.parameters()

    # initial error
    error = optable.error(*args, **kwargs)
    errorPrev = error

    # intial temperature
    temp = tempInit

    # running correlation
    corrRun = 0.0

    # probability of taking a negative step
    probs = np.ones_like(params) * 0.5

    # weight pertibations
    dw = np.empty_like(params)
    dwPrev = np.empty_like(params)

    paramTrace = [params.copy()]
    tempTrace = [temp]
    errorTrace = [error]

    # termination reason
    reason = ''

    iteration = 0

    if verbose:
        print('%d %6f' % (iteration, error))

    if callback is not None:
        callback(optable, iteration, paramTrace, errorTrace)

    while True:
        # corr err dw  action
        #  +    +   +   -
        #  -    -   +   +
        #  -    +   -   +
        #  +    -   -   -

        draw = np.random.random(params.shape)

        stepsNeg = np.where(draw < probs)[0]
        stepsPos = np.where(draw >= probs)[0]

        dwPrev[...] = dw
        dw[stepsNeg] = -stepSize
        dw[stepsPos] = stepSize

        params += dw

        errorPrev = error
        error = optable.error(*args, **kwargs)

        # increment iteration counter
        iteration += 1

        if verbose:
            print('%d %6f' % (iteration, error))

        if callback is not None:
            callback(optable, iteration, paramTrace, errorTrace)

        # keep parameter history if requested
        if pTrace:
            paramTrace.append(params.copy())

        # keep temperature trace if requested
        if tTrace:
            tempTrace.append(temp)

        # keep error function history if requested
        if eTrace:
            errorTrace.append(error)

        # terminate if maximum iterations reached
        if iteration >= maxIter:
            reason = 'maxiter'
            break

        # terminate if desired accuracy reached
        if error < accuracy:
            reason = 'accuracy'
            break

        # terminate if desired precision reached
        if np.abs(error - errorPrev) < precision:
            reason = 'precision'
            break

        # terminate if the error function diverges
        if error > divergeThresh:
            reason = 'diverge'
            break

        # current change in error
        de = error - errorPrev

        # correlation metric
        corr = de * dw

        corrRun += (np.abs(de) * np.sum(np.abs(dw))) / params.size

        if (iteration % tempIter) == 0:
            # new temperature is average correlation
            # since the previous temperature update
            temp = corrRun / tempIter

            # reset running correlation
            corrRun = 0.0

            if verbose:
                print('Cooling: %f' % temp)

        # probability of taking negative step
        # is drawn from the Boltzman Distribution
        probs[...] = 1.0 / util.capZero(1.0 + np.exp(-corr / temp))

    if verbose:
        print(reason)

    # save result into a dictionary
    result = {}
    result['params'] = params
    result['error'] = error
    result['iteration'] = iteration
    result['reason'] = reason

    if pTrace: result['pTrace'] = paramTrace
    if tTrace: result['tTrace'] = tTrace
    if eTrace: result['eTrace'] = errorTrace

    return result
Esempio n. 9
0
    def __init__(self, s, sampRate=1.0, order=20, freqs=None, **kwargs):
        """Construct a new PSD using univariate autoregressive models.

        Args:
            s:          Numpy array with shape (observations[,dimensions])
                        containing the signal data to use for generating the
                        PSD estimate.  Should be matrix-like.  Rows are
                        observations.  Columns are optional and, if present,
                        represent dimensions.

            sampRate:   Sampling frequency of data.

            order:      Order of the autoregressive models.

            freqs:      Freqencies at which to estimate the signal power.
                        If freqs is None (default) then estimate the power
                        at all integer frequencies between 1Hz and Nyquist.
                        If freqs is an integer then estimate the power at
                        freqs equally spaced frequencies above DC to
                        Nyquist.  If freqs is a list or numpy array, then
                        estimate power at the specified frequencies.
        """
        s = util.colmat(s)
        nObs, nChan = s.shape

        # AR model order
        order = order

        if freqs is None:
            freqs = np.arange(1.0, np.floor(sampRate / 2.0))
        elif isinstance(freqs, (int, )):
            freqs = np.linspace(0.0, sampRate / 2.0, freqs + 1)[1:]
        else:
            freqs = np.asarray(freqs)

        freqs = freqs
        nFreq = len(freqs)

        # period of sampling frequency
        dt = 1.0 / float(sampRate)

        # vector of model orders
        #orders = np.arange(1, order+1)[None,:]
        orders = np.arange(order, 0, -1)[None, :]

        weights = np.empty((order, nChan))
        iVar = np.empty(nChan)

        # for each channel
        for chanI, chanS in enumerate(s.T):
            # train an AR model
            arFit = AutoRegression(chanS[None, ...], order=order, **kwargs)

            # residual values of AR model
            resid = arFit.resid(chanS[None, ...])[0]

            # model weights, ditch bias
            weights[:, chanI] = arFit.model.weights[:-1].ravel()

            # innovation variance
            iVar[chanI] = np.var(resid)

        # estimate spectrum, vectorized
        powersDenom = util.capZero(
            np.abs(1.0 - np.exp(-2.0j * np.pi * freqs[:, None] * orders *
                                dt).dot(weights))**2)
        powers = (iVar[None, :] * dt) / powersDenom

        # scale to power density
        powers /= sampRate

        ## # for each channel
        ## for chanI, chanS in enumerate(s.T):
        ##     # train an AR model
        ##     arFit = AutoRegression((chanS,), order=order, *args, **kwargs)

        ##     # predicted and residual values of AR model
        ##     pred, resid = arFit.eval(chanS, returnResid=True)

        ##     # model weights, ditch bias
        ##     weights = arFit.model.weights[:-1,None]

        ##     # innovation variance
        ##     iVar = np.var(resid)

        ##     # estimate spectrum, in a loop
        ##     #for i, f in enumerate(freqs):
        ##     #    powers[i,chanI] = (iVar * dt) / (np.abs(1.0 - \
        ##     #        np.sum(weights * np.exp(-2.0j*np.pi*f*orders) * dt)))**2)

        ##     # estimate spectrum, vectorized
        ##     powers[:,chanI] = ((iVar * dt) /
        ##         (np.abs(1.0 - np.sum(weights *
        ##             np.exp(-2.0j*np.pi*freqs[:,None]*orders*dt), axis=0))**2))

        ##     # estimate spectrum vectorized using sines and cosines instead of complex numbers
        ##     #cs = np.sum(weights[:,None] * np.cos(2.0 * np.pi * freqsNorm*orders)), axis=0)
        ##     #sn = np.sum(weights[:,None] * np.sin(2.0 * np.pi * freqsNorm*orders)), axis=0)
        ##     #powers[:,chanI] = iVar / (sampRate * ((1.0 - cs)**2 + sn**2))

        PSDBase.__init__(self, freqs, powers, sampRate)
Esempio n. 10
0
    def gradient(self, x, g, returnError=True):
        """Compute the gradient of the mean-squared error with respect to the
        network weights for each layer and given inputs and targets.  Useful
        for optimization routines that make use of first-order gradients.

        Args:
            x:

            g:

            returnError:    If True (default) then also return the
                            mean-squared error.  This can improve
                            performance in some optimization routines
                            by avoiding an additional forward pass.

        Returns:
            If returnError is True, then return a tuple containing
            the error followed by a 1d numpy array containing the
            gradient of the packed weights.  If returnError is False,
            then only return the gradient.
        """
        x = np.asarray(x)
        g = np.asarray(g)

        # packed views of the hidden and visible gradient matrices
        views = util.packedViews(self.layerDims, dtype=self.dtype)
        pg  = views[0]
        hgs = views[1:-1]
        vg  = views[-1]

        # forward pass
        z1 = util.bias(x)
        z1s = [z1]
        zPrimes = []
        for hw, phi in zip(self.hws, self.transFunc):
            h = z1.dot(hw)

            z1 = util.bias(phi(h))
            z1s.append(z1)

            zPrime = phi(h, 1)
            zPrimes.append(zPrime)

        v = z1.dot(self.vw)
        probs = util.softmax(v)

        # error components
        delta = util.colmat(probs - g) / probs.size

        # visible layer gradient
        vg[...] = z1.T.dot(delta)
        vg += self.penaltyGradient(-1)

        # backward pass for hidden layers
        w = self.vw
        for l in range(self.nHLayers-1, -1, -1):
            delta = delta.dot(w[:-1,:].T) * zPrimes[l]
            hgs[l][...] = z1s[l].T.dot(delta)
            hgs[l] += self.penaltyGradient(l)
            w = self.hws[l]

        if returnError:
            error = -np.mean(g*np.log(util.capZero(probs))) + self.penaltyError()
            return error, pg
        else:
            return pg