示例#1
0
def maxlikelihood(trmodel, ds, minp=1e-4, maxp=1 - 1e-6, bounds=None, returnoptout=False,
                  optoptions={}, verbosity=1):
    """
    Finds the maximum likelihood TimeResolvedModel given the data.

    Parameters
    ----------
    timeresolvedmodel: TimeResolvedModel
        The TimeResolvedModel that is used as the seed, and which defines the class of parameterized models to optimize
        over.

    ds: DataSet
        A DataSet, containing time-series data.

    minp, maxp: float, optional
        Value used to smooth the 0 and 1 probability boundaries for the likelihood function.

    bounds: list or None, optional
        Bounds on the parameters, as specified in scipy.optimize.minimize

    optout: bool, optional
        Wether to return the output of scipy.optimize.minimize

    optoptions: dict, optional
        Optional arguments for scipy.optimize.minimize.

    Returns
    -------
    float
        The maximum loglikelihood model

    """
    maxlmodel = trmodel.copy()

    def objfunc(parameters):
        maxlmodel.set_parameters(parameters)
        return negloglikelihood(maxlmodel, ds, minp, maxp)

    if verbosity > 0:
        print("- Performing MLE over {} parameters...".format(len(maxlmodel.parameters_copy())), end='')
    if verbosity > 1:
        print("")

    seed = maxlmodel.parameters_copy()
    start = _tm.time()
    optout = _minimize(objfunc, seed, options=optoptions, bounds=bounds)
    maxlparameters = optout.x
    maxlmodel.set_parameters(maxlparameters)
    end = _tm.time()

    if verbosity == 1:
        print("complete.")
    if verbosity > 0:
        print("- Time taken: {} seconds".format(end - start)),

    if returnoptout:
        return maxlmodel, optout
    else:
        return maxlmodel
示例#2
0
def minimize_with_watcher(fun, x0, args=(), *, slow_len=(), slow_thresh=(), ctol_fun=None, ctol=1e-6, logger=None, callback=None, method_str="default method", options={}, **k):
	watcher = Watcher(fun, x0, ctol_fun=ctol_fun, ctol=ctol, logger=logger, slow_len=slow_len, slow_thresh=slow_thresh)
	if callback:
		if 'callback' in options:
			option_callback = options['callback']
			new_callback = lambda z: (callback(z), watcher(z), option_callback(z))
			del options['callback']
		else:
			new_callback = lambda z: (callback(z), watcher(z), )
	else:
		if 'callback' in options:
			option_callback = options['callback']
			new_callback = lambda z: (watcher(z), option_callback(z))
			del options['callback']
		else:
			new_callback = lambda z: (watcher(z), )
	if method_str in ("bhhh","bhhh-wolfe",):
		options['logger'] = logger
	try:
		r = _minimize(fun, x0, args, callback=new_callback, options=options, **k)
	except ProgressTooSlow as err:
		r = err.result()
		if logger:
			logger.log(30,"Progressing too slowly [{}], {}".format(method_str,r.message))
		r.slow = err.slowness
	except ComputedToleranceMet as suc:
		r = suc.result()
		r.message = "Optimization terminated successfully per computed tolerance"
		if logger:
			logger.log(30,"{} [{}]".format(suc.describe(),method_str))
	except NotImplementedError as err:
		r = OptimizeResult(message="Not Implemented", success=False, x=x0)
		if logger:
			logger.log(30,"{} [{}]".format(r.message,method_str))
#	except Exception as err:
#		r = OptimizeResult(message=str(err), success=False)
#		if logger:
#			logger.log(30,"{} [{}]".format(r.message,method_str))
	else:
		if logger:
			logger.log(30,"{} [{}]".format(r.message,method_str))
	return r
示例#3
0
def maxlikelihood(probtrajectory,
                  clickstreams,
                  times,
                  minp=0.0001,
                  maxp=0.999999,
                  method='Nelder-Mead',
                  returnOptout=False,
                  options={},
                  verbosity=1):
    """
    Implements maximum likelihood estimation over a model for a time-resolved probabilities trajectory,
    and returns the maximum likelihood model.

    Parameters
    ----------
    model : ProbTrajectory
        The model for which to maximize the likelihood of the parameters. The value of the parameters
        in the input model is used as the seed.

    clickstreams : dict
        The data, consisting of a counts time-series for each measurement outcome. This is a dictionary
        whereby the keys are the outcome labels and the values are list (or arrays) giving the number
        of times that measurement outcome was observed at the corresponding time in the `times` list.

    times : list or array
        The times associated with the data. The probabilities are extracted from the model at these
        times (see the model.get_probabilites method), to implement the model parameters optimization.

    minp : float, optional
        A positive value close to zero. The value of `p` below which x*log(p) is approximated using
        a Taylor expansion (used to smooth out the parameter boundaries and obtain better fitting
        performance). The default value should be fine.

    maxp : float, optional
        A positive value close to and <= 1. The value of `p` above which x*log(p) the boundary on p
        being <= 1 is enforced using a smooth, quickly growing function. The default value should be
        fine.

    method : str, optional
        Any value allowed for the method parameter in scipy.optimize.minimize().

    verbosity : int, optional
        The amount of print to screen.

    returnOptout : bool, optional
        Whether or not to return the output of the optimizer.

    Returns
    -------
    ProbTrajectory
        The maximum likelihood model returned by the optimizer.

    if returnOptout:
        optout
            The output of the optimizer.
    """
    maxlprobtrajectory = probtrajectory.copy()

    def objfunc(parameterslist):

        maxlprobtrajectory.set_parameters_from_list(parameterslist)

        return negloglikelihood(maxlprobtrajectory, clickstreams, times, minp,
                                maxp)

    numparams = len(
        probtrajectory.hyperparameters) * (len(probtrajectory.outcomes) - 1)

    if verbosity > 0:
        print("      - Performing MLE over {} parameters...".format(numparams),
              end='')
    if verbosity > 1:
        print("")
        options['disp'] = True

    start = _tm.time()
    seed = probtrajectory.get_parameters_as_list()
    optout = _minimize(objfunc, seed, method=method, options=options)
    mleparameters = optout.x
    end = _tm.time()

    maxlprobtrajectory.set_parameters_from_list(mleparameters)

    if verbosity == 1:
        print("complete.")
    if verbosity > 1:
        print("      - Complete!")
        print("      - Time taken: {} seconds".format(end - start)),
        nll_seed_adj = negloglikelihood(probtrajectory, clickstreams, times,
                                        minp, maxp)
        nll_seed = negloglikelihood(probtrajectory, clickstreams, times, 0, 1)
        nll_result_adj = negloglikelihood(maxlprobtrajectory, clickstreams,
                                          times, minp, maxp)
        nll_result = negloglikelihood(maxlprobtrajectory, clickstreams, times,
                                      0, 1)
        print(
            "      - The negloglikelihood of the seed = {} (with boundard adjustment = {})"
            .format(nll_seed, nll_seed_adj))
        print(
            "      - The negloglikelihood of the ouput = {} (with boundard adjustment = {})"
            .format(nll_result, nll_result_adj))

    if returnOptout:
        return maxlprobtrajectory, optout
    else:
        return maxlprobtrajectory
示例#4
0
文件: _PCO.py 项目: neurophysics/meet
def PCOa(a, Y, num=1, bestof=15):
    '''
    Phase Amplitude Coupling Optimization, variant with provided amplitude

    It maximizes the length of the "mean vector" and
    returns the filter coefficients w

    Input:
    ------
    a - (1d numpy array, floats > 0) amplitudes
    Y - (2d numpy array, complex) analytic representation of signal,
        channels x datapoints
    num - (int > 0) - determines the number of filters that will be
                      derived. This depends also on the rank of Y, the final
                      number of filters will be min([num, rank(Y)]),
                      defaults to 1
    bestof (int > 0) - the number of restarts for the optimization of the
                       individual filters. The best filter over all
                       these restarts with random initializations is
                       chosen, defaults to 15.

    Output:
    -------
    vlen - numpy array - the length of the mean vector for each filter
    Wy - numpy array - the filters for Y, each filter is in a column of Wy
                       (if num==1: Wx is 1d)
    '''
    ############################
    # test all input arguments #
    ############################
    try:
        a = _np.asarray(a).astype(float)
    except:
        raise TypeError('a must be iterable of floats')
    if not a.ndim == 1:
        raise ValueError('a must be 1-dimensional')
    ###
    try:
        Y = _np.asarray(Y)
    except:
        raise TypeError('Y must be iterable')
    if not _np.iscomplexobj(Y):
        raise TypeError('Y must be complex valued')
    if not Y.ndim == 2:
        raise ValueError('Y must be 2d')
    if not Y.shape[1] == len(a):
        raise ValueError('Number of points in Y must match the length of a')
    ###
    if not isinstance(num, int):
        raise TypeError('num must be integer')
    if not num > 0:
        raise ValueError('num must be > 0')
    ###
    if not isinstance(bestof, int):
        raise TypeError('bestof must be integer')
    if not bestof > 0:
        raise ValueError('bestof must be > 0')
    #####################################################
    # normalize a to have zero mean and a variance of 1 #
    #####################################################
    a = (a - a.mean())/a.std()
    ######################################
    # Whiten the (real part of the) data #
    ######################################
    Why, sy = _linalg.svd(Y.real, full_matrices=False)[:2]
    # get rank
    py = (sy > (_np.max(sy) * _np.max([Y.shape[0],_np.prod(Y.shape[1:])]) *
          _np.finfo(Y.dtype).eps)).sum()
    Why = Why[:,:py] / sy[:py][_np.newaxis]
    # whiten for the real part of the data
    Y = _np.dot(Why.T, Y)
    # get the final number of filters
    num = _np.min([num, py])
    ######################
    # start optimization #
    ######################
    for i in xrange(num):
        if i == 0:
            # get first filter
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _PCOa_obj_der, fprime = None,
                               x0 = _np.random.random(py) * 2 -1,
                               args = (a,Y,-1,True,False),
                               m=100, approx_grad=False, iprint=0)[:2]
                     for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _PCOa_obj_der(
                        optres[k,0],
                        a,Y,-1,False,False)
            # determine the best
            best = _np.argmin(optres[:,1])
            # save results
            vlen = [optres[best,1]]
            filt = optres[best,0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            By = _linalg.svd(_np.atleast_2d(filt.T),
                             full_matrices=True)[2][i:].T
            Yb = _np.dot(By.T,Y)
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _PCOa_obj_der, fprime = None,
                               x0 = _np.random.random(py - i) * 2 -1,
                               args = (a,Yb,-1,True,False),
                               m=100, approx_grad=False, iprint=0)[:2]
                     for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-determined here
            for k in xrange(bestof):
                optres[k,1] = _PCOa_obj_der(
                        optres[k,0],
                        a,Yb,-1,False,False)
            # determine the best
            best = _np.argmin(optres[:,1])
            # save results
            vlen = vlen + [optres[best,1]]
            filt = _np.column_stack([
                filt,
                By.dot(optres[best,0])])
    # project filters back into original (un-whitened) channel space
    Wy = Why.dot(filt)
    #normalize filters to have unit length
    Wy = Wy / _np.sqrt(_np.sum(Wy**2, 0))
    return -1*_np.array(vlen), Wy
示例#5
0
def cSPoAvgC(X, opt='max', num=1, log=True, bestof=15):
    """
    canonical Soure Power Average Correlation analysis (cSPoAvgC)
    
    For the dataset X, find a linear filters wx, such
    that the average correlation of the amplitude envelopes of wx.T.dot(X)
    and (wx.T.dot(X)).mean(-1) is maximized, i.e. it seeks a spatial filter
    to maximize the correlation of amplitude envelopes and their average.

    The solution is inspired by and derived by the original cSPoC-Analysis

    Reference:
    ----------
    Dahne, S., et al., Finding brain oscillations with power dependencies
    in neuroimaging data, NeuroImage (2014),
    http://dx.doi.org/10.1016/j.neuroimage.2014.03.075

    Notes:
    ------
    Dataset X must be a 3d a array of shape
    (channels x datapoints x trials).
    
    If log == True, then the log transform is taken before the average
    inside the trial
    
    If X is of complex type, it is assumed that these is the analytic
    representations of X, i.e., the hilbert transform was applied before.
    
    The filters are in the columns of the filter matrices Wx
    
    The input data can be filtered as:    
    
    np.tensordot(Wx, X, axes=(0,0))

    Input:
    ------
    -- X numpy array - the dataset of shape px x N x tr, where px is the
                       number of sensors, N the number of data-points, tr
                       the number of trials. If X is of complex
                       type it is assumed that this already is the
                       analytic representation of X, i.e. the hilbert
                       transform already was applied.
    -- opt {'max', 'min', 'zero'} - determines whether the correlation coefficient
                            should be maximized - seeks for positive
                            correlations ('max', default);
                            or minimized - seeks for anti-correlations
                            ('min'), ('zero') seeks for zero correlation
    -- num int > 0 - determine the number of filters that will be derived.
                     This depends also on the rank of X, if X is 2d, the
                     number of filter pairs will be: min([num, rank(X)]).
                     If X is 3d, the array is flattened into a 2d array
                     before calculating the rank
    -- log {True, False} - compute the correlation between the log-
                           transformed envelopes, if datasets come in
                           epochs, then the log is taken before averaging
                           inside the epochs, defaults to True
    -- bestof int > 0 - the number of restarts for the optimization of the
                        individual filter pairs. The best filter over all
                        these restarts with random initializations is
                        chosen, defaults to 15.
    Output:
    -------
    corr - numpy array - the canonical correlations of the amplitude
                         envelopes for each filter
    Wx - numpy array - the filters for X, each filter is in a column of Wx
                       (if num==1: Wx is 1d)
    """
    #check input
    assert isinstance(X, _np.ndarray), "X must be numpy array"
    assert (X.ndim==3), "X must be 3D numpy array"
    assert opt in ['max', 'min', 'zero'], "\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\""
    assert isinstance(num, int), "\"num\" must be integer > 0"
    assert num > 0, "\"num\" must be integer > 0"
    assert log in [True, False, 0, 1], "\"log\" must be a boolean (True " \
                                     + "or False)"
    assert isinstance(bestof, int), "\"bestof\" must be integer > 0"
    assert bestof > 0, "\"bestof\" must be integer > 0"
    # get whitening transformation for X
    Whx, sx = _linalg.svd(X.reshape(X.shape[0],-1).real, full_matrices=False)[:2]
    #get rank
    px = (sx > (_np.max(sx) * _np.max([X.shape[0],_np.prod(X.shape[1:])]) *
          _np.finfo(X.dtype).eps)).sum()
    Whx = Whx[:,:px] / sx[:px][_np.newaxis]
    # whiten the data
    X = _np.tensordot(Whx, X, axes = (0,0))
    # get hilbert transform
    if not _np.iscomplexobj(X):
        X = _signal.hilbert(X, axis=1)
    # get the final number of filters
    num = _np.min([num, px])
    # determine if correlation coefficient is maximized or minimized
    if opt == 'max': sign = -1
    elif opt == 'min': sign = 1
    elif opt == 'zero': sign = 0
    else: raise ValueError("\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\"")
    # start optimization
    for i in xrange(num):
        if i == 0:
            # get first filter
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _env_corr_avg, fprime = None,
                               x0 = _np.random.random(px) * 2 -1,
                               args = (X, sign, log), m=100,
                                   approx_grad=False, iprint=1)[:2]
                     for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _env_corr_avg(
                        optres[k,0],
                        X, sign, log)[0]
            # determine the best_result
            best = _np.argmin(optres[:,1])
            # save results
            if sign != 0:
                corr = [sign * optres[best,1]]
            else:
                corr = [optres[best,1]]
            filt = optres[best,0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            Bx = _linalg.svd(_np.atleast_2d(filt.T),
                             full_matrices=True)[2][i:].T
            Xb = _np.tensordot(Bx,X, axes=(0,0))
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _env_corr_avg, fprime = None,
                               x0 = _np.random.random(px-i) * 2 -1,
                               args = (Xb, sign, log),
                               m=100, approx_grad=False, iprint=1)[:2]
                               for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _env_corr_avg(
                        optres[k,0],
                        Xb, sign, log)[0]
            # determine the best result
            best = _np.argmin(optres[:,1])
            # save results
            if sign != 0:
                corr = corr + [sign * optres[best,1]]
            else:
                corr = corr + [optres[best,1]]
            filt = _np.column_stack([filt, Bx.dot(optres[best,0])])
    # project filters back into original (un-whitened) channel space
    Wx = Whx.dot(filt)
    #normalize filters to have unit length
    Wx = Wx / _np.sqrt(_np.sum(Wx**2, 0))
    return _np.array(corr), Wx
示例#6
0
def cSPoC(X, Y, opt='max', num=1, log=True, bestof=15, x_ind=None, y_ind=None):
    """
    canonical Soure Power Correlation analysis (cSPoC)
    
    For the datasets X and Y, find a pair of linear filters wx and wy, such
    that the correlation of the amplitude envelopes wx.T.dot(X) and
    wy.T.dot(Y) is maximized.

    Reference:
    ----------
    Dahne, S., et al., Finding brain oscillations with power dependencies
    in neuroimaging data, NeuroImage (2014),
    http://dx.doi.org/10.1016/j.neuroimage.2014.03.075

    Notes:
    ------
    Datasets X and Y can be either 2d numpy arrays of shape
    (channels x datapoints) or 3d array of shape
    (channels x datapoints x trials).
    For 3d arrays the average envelope in each trial is calculated if x_ind
    (or y_ind, respectively) is None. If they are set, the difference of
    the instantaneous amplitude envelope at x_ind/y_ind and the average
    envelope is calculated for each trial.
    If log == True, then the log transform is taken before the average
    inside the trial

    If X and/or Y are of complex type, it is assumed that these are the
    analytic representations of X and Y, i.e., the hilbert transform was
    applied before.
    
    The filters are in the columns of the filter matrices Wx and Wy,
    for 2d input the data can be filtered as:
    
    np.dot(Wx.T, X)
    
    for 3d input:
    
    np.tensordot(Wx, X, axes=(0,0))

    Input:
    ------
    -- X numpy array - the first dataset of shape px x N (x tr), where px
                       is the number of sensors, N the number of data-
                       points, tr the number of trials. If X is of complex
                       type it is assumed that this already is the
                       analytic representation of X, i.e. the hilbert
                       transform already was applied.
    -- Y is the second dataset of shape py x N (x tr)
    -- opt {'max', 'min'} - determines whether the correlation coefficient
                            should be maximized - seeks for positive
                            correlations ('max', default);
                            or minimized - seeks for anti-correlations
                            ('min')
    -- num int > 0 - determine the number of filter-pairs that will be
                     derived. This depends also on the ranks of X and Y,
                     if X and Y are 2d the number of filter pairs will be:
                     min([num, rank(X), rank(Y)]). If X and/or Y are 3d the
                     array is flattened into a 2d array before calculating
                     the rank
    -- log {True, False} - compute the correlation between the log-
                           transformed envelopes, if datasets come in
                           epochs, then the log is taken before averaging
                           inside the epochs, defaults to True
    -- bestof int > 0 - the number of restarts for the optimization of the
                        individual filter pairs. The best filter over all
                        these restarts with random initializations is
                        chosen, defaults to 15.
    -- x_ind int - the time index (-X.shape[1] <= x_ind < X.shape[1]) where
                   the difference of the instantaneous envelope and the
                   average envelope is determined for X
    -- y_ind int - the time index (-Y.shape[1] <= y_ind < Y.shape[1]) where
                   the difference of the instantaneous envelope and the
                   average envelope is determined for Y

    Output:
    -------
    corr - numpy array - the canonical correlations of the amplitude
                         envelopes for each filter
    Wx - numpy array - the filters for X, each filter is in a column of Wx
                       (if num==1: Wx is 1d)
    Wy - numpy array - the filters for Y, each filter is in a column of Wy
                       (if num==1: Wy is 1d)
    """
    #check input
    assert isinstance(X, _np.ndarray), "X must be numpy array"
    assert (X.ndim ==2 or X.ndim==3), "X must be 2D or 3D numpy array"
    assert isinstance(Y, _np.ndarray), "X must be numpy array"
    assert (Y.ndim ==2 or Y.ndim==3), "Y must be 2D or 3D numpy array"
    assert X.shape[-1] == Y.shape[-1], "Size of last dimension in X and" \
                                     + " Y must be equal"
    assert opt in ['max', 'min'], "\"opt\" must be \"max\" or \"min\""
    assert isinstance(num, int), "\"num\" must be integer > 0"
    assert num > 0, "\"num\" must be integer > 0"
    assert log in [True, False, 0, 1], "\"log\" must be a boolean (True " \
                                     + "or False)"
    assert isinstance(bestof, int), "\"bestof\" must be integer > 0"
    assert bestof > 0, "\"bestof\" must be integer > 0"
    if x_ind != None:
        assert X.ndim == 3, "If x_ind is set, X must be 3d array!"
        assert isinstance(x_ind, int), "x_ind must be integer!"
        assert ((x_ind >= -X.shape[1]) and
                (x_ind < X.shape[1])), "x_ind must match the range of " +\
                                       "X.shape[1]"
    if y_ind != None:
        assert Y.ndim == 3, "If y_ind is set, Y must be 3d array!"
        assert isinstance(y_ind, int), "y_ind must be integer!"
        assert ((y_ind >= -Y.shape[1]) and
                (y_ind < Y.shape[1])), "y_ind must match the range of " +\
                                       "Y.shape[1]"
    # get whitening transformations
    # for X
    Whx, sx = _linalg.svd(X.reshape(X.shape[0],-1).real, full_matrices=False)[:2]
    #get rank
    px = (sx > (_np.max(sx) * _np.max([X.shape[0],_np.prod(X.shape[1:])]) *
          _np.finfo(X.dtype).eps)).sum()
    Whx = Whx[:,:px] / sx[:px][_np.newaxis]
    # for Y
    Why, sy = _linalg.svd(Y.reshape(Y.shape[0],-1).real, full_matrices=False)[:2]
    # get rank
    py = (sy > (_np.max(sy) * _np.max([Y.shape[0],_np.prod(Y.shape[1:])]) *
          _np.finfo(Y.dtype).eps)).sum()
    Why = Why[:,:py] / sy[:py][_np.newaxis]
    # whiten the data
    X = _np.tensordot(Whx, X, axes = (0,0))
    Y = _np.tensordot(Why, Y, axes = (0,0))
    # get hilbert transform (if not complex)
    if not _np.iscomplexobj(X):
        X = _signal.hilbert(X, axis=1)
    if not _np.iscomplexobj(Y):
        Y = _signal.hilbert(Y, axis=1)
    # get the final number of filters
    num = _np.min([num, px, py])
    # determine if correlation coefficient is maximized or minimized
    if opt == 'max': sign = -1
    else: sign = 1
    # start optimization
    for i in xrange(num):
        if i == 0:
            # get first pair of filters
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _env_corr, fprime = None,
                               x0 = _np.random.random(px+py) * 2 -1,
                               args = (X, Y, sign, log, x_ind, y_ind),
                               m=100, approx_grad=False, iprint=1)[:2]
                     for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _env_corr(
                        optres[k,0],
                        X, Y, sign, log, x_ind, y_ind)[0]
            # determine the best
            best = _np.argmin(optres[:,1])
            # save results
            corr = [sign * optres[best,1]]
            filt = optres[best,0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            Bx = _linalg.svd(_np.atleast_2d(filt[:px].T),
                             full_matrices=True)[2][i:].T
            Xb = _np.tensordot(Bx,X, axes=(0,0))
            By = _linalg.svd(_np.atleast_2d(filt[px:].T),
                             full_matrices=True)[2][i:].T
            Yb = _np.tensordot(By,Y, axes=(0,0))
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _env_corr, fprime = None,
                               x0 = _np.random.random(px+py-2*i) * 2 -1,
                               args = (Xb, Yb, sign, log, x_ind, y_ind),
                               m=100, approx_grad=False, iprint=1)[:2]
                               for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _env_corr(
                        optres[k,0],
                        Xb, Yb, sign, log, x_ind, y_ind)[0]
            # determine the best
            best = _np.argmin(optres[:,1])
            # save results
            corr = corr + [sign * optres[best,1]]
            filt = _np.column_stack([filt,
                _np.hstack([Bx.dot(optres[best,0][:px-i]),
                            By.dot(optres[best,0][px-i:])])
                                    ])
    # project filters back into original (un-whitened) channel space
    Wx = Whx.dot(filt[:px])
    Wy = Why.dot(filt[px:])
    #normalize filters to have unit length
    Wx = Wx / _np.sqrt(_np.sum(Wx**2, 0))
    Wy = Wy / _np.sqrt(_np.sum(Wy**2, 0))
    return _np.array(corr), Wx, Wy
示例#7
0
def cSPoAC(X, tau=1, opt='max', num=1, log=True, bestof=15, x_ind=None):
    """
    canonical Soure Power Auto-Correlation analysis (cSPoAC)
    
    For the dataset X, find a linear filters wx, such
    that the correlation of the amplitude envelopes wx.T.dot(X[...,:-tau])
    and wx.T.dot(X[...,tau:]) is maximized, i.e. it seeks a spatial filter
    to maximize the auto-correlation of amplitude envelopes for a shift
    of tau (example for X being 2D).
    Alternatively tau can be an array of indices to X, such that
    X[...,tau[0]] and X[...,tau[1]] defince the lag.

    The solution is inspired by and derived by the original cSPoC-Analysis

    Reference:
    ----------
    Dahne, S., et al., Finding brain oscillations with power dependencies
    in neuroimaging data, NeuroImage (2014),
    http://dx.doi.org/10.1016/j.neuroimage.2014.03.075

    Notes:
    ------
    Dataset X can be either a 2d numpy array of shape
    (channels x datapoints) or 3d a array of shape
    (channels x datapoints x trials). For 2d array tau denotes a lag in
    the time domain, for 3d tau denotes a trial-wise lag.
    For 3d arrays the average envelope in each trial is calculated if x_ind
    is None. If it is set, the difference of the instantaneous amplitude
    at x_ind and the average envelope is calculated for each trial.

    If log == True, then the log transform is taken before the average
    inside the trial
    If X is of complex type, it is assumed that these is the analytic
    representations of X, i.e., the hilbert transform was applied before.
    
    The filters are in the columns of the filter matrices Wx and Wy,
    for 2d input the data can be filtered as:
    
    np.dot(Wx.T, X)
    
    for 3d input:
    
    np.tensordot(Wx, X, axes=(0,0))

    Input:
    ------
    -- X numpy array - the dataset of shape px x N (x tr), where px is the
                       number of sensors, N the number of data-points, tr
                       the number of trials. If X is of complex
                       type it is assumed that this already is the
                       analytic representation of X, i.e. the hilbert
                       transform already was applied.
    -- tau int or array of ints - the lag to calculate the autocorrelation,
                                  if X.ndim==2, this is a time-wise lag, if
                                  X.ndim==3, this is a trial-wise lag.
                                  Alternatively tau can be an array of ints,
                                  such that X[...,tau[0]] and X[...,tau[1]]
                                  are correlated.
    -- opt {'max', 'min', 'zero'} - determines whether the correlation coefficient
                            should be maximized - seeks for positive
                            correlations ('max', default);
                            or minimized - seeks for anti-correlations
                            ('min'), ('zero') seeks for zero correlation
    -- num int > 0 - determine the number of filters that will be derived.
                     This depends also on the rank of X, if X is 2d, the
                     number of filter pairs will be: min([num, rank(X)]).
                     If X is 3d, the array is flattened into a 2d array
                     before calculating the rank
    -- log {True, False} - compute the correlation between the log-
                           transformed envelopes, if datasets come in
                           epochs, then the log is taken before averaging
                           inside the epochs, defaults to True
    -- bestof int > 0 - the number of restarts for the optimization of the
                        individual filter pairs. The best filter over all
                        these restarts with random initializations is
                        chosen, defaults to 15.
    -- x_ind int - the time index (-X.shape[1] <= x_ind < X.shape[1]),
                   where the difference of the instantaneous envelope and
                   the average envelope is determined for X

    Output:
    -------
    corr - numpy array - the canonical correlations of the amplitude
                         envelopes for each filter
    Wx - numpy array - the filters for X, each filter is in a column of Wx
                       (if num==1: Wx is 1d)
    """
    #check input
    assert isinstance(X, _np.ndarray), "X must be numpy array"
    assert (X.ndim ==2 or X.ndim==3), "X must be 2D or 3D numpy array"
    if isinstance(X, _np.ndarray):
        try:
            X[...,tau[0]]
            X[...,tau[1]]
        except:
            raise ValueError("""
                    If tau is an array, tau[0] and tau[1] must be subarrays
                    of valid indices to X defining a certain lag, i.e., 
                    the correlation between X[...,tau[0]] and X[...,tau[1]]
                     is optimized""")
    else:
        assert isinstance(tau, int), "tau must be an array integer-valued"
        assert ((tau > 0) and (tau < (X.shape[-1]-1))
            ), "tau must be >0 and smaller than the last dim of X " +\
               "minus 1."
        tau = np.array([
            np.arange(0,X.shape[-1]-tau,1),
            np.arange(tau, X.shape[-1],1)
            ])
    assert opt in ['max', 'min', 'zero'], "\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\""
    assert isinstance(num, int), "\"num\" must be integer > 0"
    assert num > 0, "\"num\" must be integer > 0"
    assert log in [True, False, 0, 1], "\"log\" must be a boolean (True " \
                                     + "or False)"
    assert isinstance(bestof, int), "\"bestof\" must be integer > 0"
    assert bestof > 0, "\"bestof\" must be integer > 0"
    if x_ind != None:
        assert X.ndim == 3, "If x_ind is set, X must be 3d array!"
        assert isinstance(x_ind, int), "x_ind must be integer!"
        assert ((x_ind >= -X.shape[1]) and
                (x_ind < X.shape[1])), "x_ind must match the range of " +\
                                       "X.shape[1]"
    # get whitening transformation for X
    Whx, sx = _linalg.svd(X.reshape(X.shape[0],-1).real, full_matrices=False)[:2]
    #get rank
    px = (sx > (_np.max(sx) * _np.max([X.shape[0],_np.prod(X.shape[1:])]) *
          _np.finfo(X.dtype).eps)).sum()
    Whx = Whx[:,:px] / sx[:px][_np.newaxis]
    # whiten the data
    X = _np.tensordot(Whx, X, axes = (0,0))
    # get hilbert transform
    if not _np.iscomplexobj(X):
        X = _signal.hilbert(X, axis=1)
    # get the final number of filters
    num = _np.min([num, px])
    # determine if correlation coefficient is maximized or minimized
    if opt == 'max': sign = -1
    elif opt == 'min': sign = 1
    elif opt == 'zero': sign = 0
    else: raise ValueError("\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\"")
    # start optimization
    for i in xrange(num):
        if i == 0:
            # get first filter
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _env_corr_same, fprime = None,
                               x0 = _np.random.random(px) * 2 -1,
                               args = (X[...,tau[0]], X[...,tau[1]], sign, log,
                               x_ind, x_ind),
                               m=100, approx_grad=False, iprint=1)[:2]
                     for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _env_corr_same(
                        optres[k,0],
                        X[...,tau[0]], X[...,tau[1]], sign, log,
                               x_ind, x_ind)[0]
            # determine the best_result
            best = _np.argmin(optres[:,1])
            # save results
            if sign != 0:
                corr = [sign * optres[best,1]]
            else:
                corr = [optres[best,1]]
            filt = optres[best,0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            Bx = _linalg.svd(_np.atleast_2d(filt.T),
                             full_matrices=True)[2][i:].T
            Xb = _np.tensordot(Bx,X, axes=(0,0))
            # get best parameters and function values of each run
            optres = _np.array([
                     _minimize(func = _env_corr_same, fprime = None,
                               x0 = _np.random.random(px-i) * 2 -1,
                               args = (Xb[...,tau[0]], Xb[...,tau[1]], sign, log,
                               x_ind, x_ind),
                               m=100, approx_grad=False, iprint=1)[:2]
                               for k in xrange(bestof)])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k,1] = _env_corr_same(
                        optres[k,0],
                        Xb[...,tau[0]], Xb[...,tau[1]], sign, log,
                               x_ind, x_ind)[0]
            # determine the best result
            best = _np.argmin(optres[:,1])
            # save results
            if sign != 0:
                corr = corr + [sign * optres[best,1]]
            else:
                corr = corr + [optres[best,1]]
            filt = _np.column_stack([filt, Bx.dot(optres[best,0])])
    # project filters back into original (un-whitened) channel space
    Wx = Whx.dot(filt)
    #normalize filters to have unit length
    Wx = Wx / _np.sqrt(_np.sum(Wx**2, 0))
    return _np.array(corr), Wx
示例#8
0
def PCOa(a, Y, num=1, bestof=15):
    '''
    Phase Amplitude Coupling Optimization, variant with provided amplitude

    It maximizes the length of the "mean vector" and
    returns the filter coefficients w

    Input:
    ------
    a - (1d numpy array, floats > 0) amplitudes
    Y - (2d numpy array, complex) analytic representation of signal,
        channels x datapoints
    num - (int > 0) - determines the number of filters that will be
                      derived. This depends also on the rank of Y, the final
                      number of filters will be min([num, rank(Y)]),
                      defaults to 1
    bestof (int > 0) - the number of restarts for the optimization of the
                       individual filters. The best filter over all
                       these restarts with random initializations is
                       chosen, defaults to 15.

    Output:
    -------
    vlen - numpy array - the length of the mean vector for each filter
    Wy - numpy array - the filters for Y, each filter is in a column of Wy
                       (if num==1: Wx is 1d)
    '''
    ############################
    # test all input arguments #
    ############################
    try:
        a = _np.asarray(a).astype(float)
    except:
        raise TypeError('a must be iterable of floats')
    if not a.ndim == 1:
        raise ValueError('a must be 1-dimensional')
    ###
    try:
        Y = _np.asarray(Y)
    except:
        raise TypeError('Y must be iterable')
    if not _np.iscomplexobj(Y):
        raise TypeError('Y must be complex valued')
    if not Y.ndim == 2:
        raise ValueError('Y must be 2d')
    if not Y.shape[1] == len(a):
        raise ValueError('Number of points in Y must match the length of a')
    ###
    if not isinstance(num, int):
        raise TypeError('num must be integer')
    if not num > 0:
        raise ValueError('num must be > 0')
    ###
    if not isinstance(bestof, int):
        raise TypeError('bestof must be integer')
    if not bestof > 0:
        raise ValueError('bestof must be > 0')
    #####################################################
    # normalize a to have zero mean and a variance of 1 #
    #####################################################
    a = (a - a.mean()) / a.std()
    ######################################
    # Whiten the (real part of the) data #
    ######################################
    Why, sy = _linalg.svd(Y.real, full_matrices=False)[:2]
    # get rank
    py = (
        sy >
        (_np.max(sy) * _np.max([Y.shape[0], _np.prod(Y.shape[1:])]) *
         _np.finfo(Y.dtype).eps)).sum()
    Why = Why[:, :py] / sy[:py][_np.newaxis]
    # whiten for the real part of the data
    Y = _np.dot(Why.T, Y)
    # get the final number of filters
    num = _np.min([num, py])
    ######################
    # start optimization #
    ######################
    for i in xrange(num):
        if i == 0:
            # get first filter
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_PCOa_obj_der,
                          fprime=None,
                          x0=_np.random.random(py) * 2 - 1,
                          args=(a, Y, -1, True, False),
                          m=100,
                          approx_grad=False,
                          iprint=0)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _PCOa_obj_der(optres[k, 0], a, Y, -1, False,
                                             False)
            # determine the best
            best = _np.argmin(optres[:, 1])
            # save results
            vlen = [optres[best, 1]]
            filt = optres[best, 0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            By = _linalg.svd(_np.atleast_2d(filt.T),
                             full_matrices=True)[2][i:].T
            Yb = _np.dot(By.T, Y)
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_PCOa_obj_der,
                          fprime=None,
                          x0=_np.random.random(py - i) * 2 - 1,
                          args=(a, Yb, -1, True, False),
                          m=100,
                          approx_grad=False,
                          iprint=0)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-determined here
            for k in xrange(bestof):
                optres[k, 1] = _PCOa_obj_der(optres[k, 0], a, Yb, -1, False,
                                             False)
            # determine the best
            best = _np.argmin(optres[:, 1])
            # save results
            vlen = vlen + [optres[best, 1]]
            filt = _np.column_stack([filt, By.dot(optres[best, 0])])
    # project filters back into original (un-whitened) channel space
    Wy = Why.dot(filt)
    #normalize filters to have unit length
    Wy = Wy / _np.sqrt(_np.sum(Wy**2, 0))
    return -1 * _np.array(vlen), Wy
示例#9
0
    def do_fit(xdata,
               ydata,
               fit='standard',
               fit_parameters_dict=None,
               one_freq_dict=None,
               weights=None):
        xdata = _np.asarray(xdata) - 1  #discount Clifford-inverse
        ydata = _np.asarray(ydata)
        if weights is None:
            weights = _np.array([1.] * len(xdata))
        if one_freq_dict is None:

            def obj_func_full(params):
                A, Bs, f = params
                return _np.sum((A + (Bs - A) * f**xdata - ydata)**2 / weights)

            def obj_func_1st_order_full(params):
                A1, B1s, C1, f1 = params
                return _np.sum(
                    (A1 +
                     (B1s - A1 + C1 * xdata) * f1**xdata - ydata)**2 / weights)
        else:
            xdata_correction = _np.array(one_freq_dict['m_list']) - 1
            n_0_list = one_freq_dict['n_0_list']
            N_list = one_freq_dict['N_list']
            K_list = one_freq_dict['K_list']
            ydata_correction = []
            weights_correction = []
            indices_to_delete = []
            for m in xdata_correction:
                for i, j in enumerate(xdata):
                    if j == m:
                        ydata_correction.append(ydata[i])
                        weights_correction.append(weights[i])
                        #                        indices_to_delete.append([i])
                        indices_to_delete.append(i)
            ydata_correction = _np.array(ydata_correction)
            xdata = _np.delete(xdata, indices_to_delete)
            ydata = _np.delete(ydata, indices_to_delete)
            weights = _np.delete(weights, indices_to_delete)
            #            print(one_freq_dict)
            #            print('xdata:')
            #            print(xdata)
            #            print('ydata:')
            #            print(ydata)
            #            print('weights:')
            #            print(weights)
            case_1_dict = {}
            case_3_dict = {}
            for m, n_0, N, K in zip(xdata_correction, n_0_list, N_list,
                                    K_list):
                case_1_dict[m, n_0, N, K] = (n_0 - 1)**(n_0 - 1) * (N - n_0)**(
                    N - n_0) / ((N - 1)**(N - 1))
                case_3_dict[m, n_0, N,
                            K] = (n_0**n_0 * (N - n_0 - 1)**(N - n_0 - 1)) / (
                                (N - 1)**(N - 1))

            def obj_func_full(params):
                A, Bs, f = params
                ###                print("A="+str(A))
                ###                print("Bs="+str(Bs))
                ###                print("f="+str(f))
                if (((A < 0 or A > 1) or (Bs < 0 or Bs > 1))
                        or (f <= 0 or f >= 1)):
                    return 1e6
                total_0 = _np.sum(
                    (A + (Bs - A) * f**xdata - ydata)**2 / weights)
                #                correction_1 = _np.sum((A+(Bs-A)*f**xdata_correction - ydata_correction)**2  / weights_correction)
                correction_2 = 0
                for m, n_0, N, K in zip(xdata_correction, n_0_list, N_list,
                                        K_list):
                    #                    print("type(A)="+str(type(A)))
                    #                    print("type(Bs)="+str(type(Bs)))
                    #                    print("type(f)="+str(type(f)))
                    #                    print("type(m)="+str(type(m)))
                    #                    print("type(n_0)="+str(type(n_0)))
                    #                    print("type(N)="+str(type(N)))
                    #                    print("type(K)="+str(type(K)))
                    F = A + (Bs - A) * f**m
                    #                    print("A="+str(A))
                    #                    print("Bs="+str(Bs))
                    #                    print("f="+str(f))
                    #                    print("m="+str(m))
                    try:
                        assert len(F) == 1
                        F = F[0]
                    except:
                        pass
#                    print("F="+str(F))
#                    print("K="+str(K))
#                    print("n_0="+str(n_0))
#                    print("N="+str(N))
                    if F < (n_0 - 1) / (N - 1):
                        ###                        print('Case 1 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print(case_1_dict[m,n_0,N,K])
                        #                        print("type(F)="+str(type(F)))
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        #                        correction_2 += K * _np.log(F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))#+1e-10)
                        correction_2 += K * _np.log(
                            F * case_1_dict[m, n_0, N, K])  #+1e-10
##                        correction_2 += (F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))**K
                    elif F >= (n_0 - 1) / (N - 1) and F < n_0 / (N - 1):
                        ###                        print('Case 2 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(F**n_0 * (1-F)**(N-n_0))
                        #                        print(case_1_dict[m,n_0,N,K])
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        correction_2 += K * _np.log(F**n_0 *
                                                    (1 - F)**(N - n_0))
##                        correction_2 += (F**n_0 * (1-F)**(N-n_0))**K
                    elif F >= n_0 / (N - 1):
                        ###                        print('Case 3 m='+str(m))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(case_3_dict[m,n_0,N,K])
                        correction_2 += K * _np.log(
                            (1 - F) * case_3_dict[m, n_0, N, K])  #+1e-10)
#                        correction_2 += K * _np.log((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))#+1e-10)
##                        correction_2 += ((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))**K
                    else:
                        #print((n_0 - 1) / (N - 1))
                        #print(n_0 / (N-1))
                        #print("F="+str(F))
                        #print("A="+str(A))
                        #print("Bs="+str(Bs))
                        #print("f="+str(f))
                        #print("m="+str(m))
                        raise ValueError(
                            "F does not fall within any physical bounds for m="
                            + str(m) + "!")
#                return total_0 - correction_1 + correction_2
                return total_0 - correction_2

            def obj_func_1st_order_full(params):
                #                print("1st order call")
                A1, B1s, C1, f1 = params
                total_0 = _np.sum(
                    (A1 +
                     (B1s - A1 + C1 * xdata) * f1**xdata - ydata)**2 / weights)
                #                correction_1 = _np.sum((A1+(B1s-A1+C1*xdata_correction)*f1**xdata_correction-ydata_correction)**2 / weights)
                correction_2 = 0
                for m, n_0, N, K in zip(xdata_correction, n_0_list, N_list,
                                        K_list):
                    #                    print("type(A)="+str(type(A)))
                    #                    print("type(Bs)="+str(type(Bs)))
                    #                    print("type(f)="+str(type(f)))
                    #                    print("type(m)="+str(type(m)))
                    #                    print("type(n_0)="+str(type(n_0)))
                    #                    print("type(N)="+str(type(N)))
                    #                    print("type(K)="+str(type(K)))
                    F = A + (Bs - A) * f**m
                    #                    print("A="+str(A))
                    #                    print("Bs="+str(Bs))
                    #                    print("f="+str(f))
                    #                    print("m="+str(m))
                    try:
                        assert len(F) == 1
                        F = F[0]
                    except:
                        pass
#                    print("F="+str(F))
#                    print("K="+str(K))
#                    print("n_0="+str(n_0))
#                    print("N="+str(N))
                    if F < (n_0 - 1) / (N - 1):
                        ###                        print('Case 1 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print(case_1_dict[m,n_0,N,K])
                        #                        print("type(F)="+str(type(F)))
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        #                        correction_2 += K * _np.log(F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))#+1e-10)
                        correction_2 += K * _np.log(
                            F * case_1_dict[m, n_0, N, K])  #+1e-10
##                        correction_2 += (F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))**K
                    elif F >= (n_0 - 1) / (N - 1) and F < n_0 / (N - 1):
                        ###                        print('Case 2 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(F**n_0 * (1-F)**(N-n_0))
                        #                        print(case_1_dict[m,n_0,N,K])
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        correction_2 += K * _np.log(F**n_0 *
                                                    (1 - F)**(N - n_0))
##                        correction_2 += (F**n_0 * (1-F)**(N-n_0))**K
                    elif F >= n_0 / (N - 1):
                        ###                        print('Case 3 m='+str(m))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(case_3_dict[m,n_0,N,K])
                        correction_2 += K * _np.log(
                            (1 - F) * case_3_dict[m, n_0, N, K])  #+1e-10)
#                        correction_2 += K * _np.log((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))#+1e-10)
##                        correction_2 += ((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))**K
                    else:
                        #print((n_0 - 1) / (N - 1))
                        #print(n_0 / (N-1))
                        #print("F="+str(F))
                        #print("A="+str(A))
                        #print("Bs="+str(Bs))
                        #print("f="+str(f))
                        #print("m="+str(m))
                        raise ValueError(
                            "F does not fall within any physical bounds for m="
                            + str(m) + "!")


#                return total_0 - correction_1 + correction_2
                return total_0 - correction_2

        def obj_func_1d(f):
            A = 0.5
            Bs = 1.
            return obj_func_full([A, Bs, f])

        if fit_parameters_dict is None:
            fit_parameters_dict = {}
        if fit == 'standard' or fit == 'first order':
            if 'f0' not in fit_parameters_dict.keys():
                fit_parameters_dict['f0'] = 0.99
            if 'f_bnd' not in fit_parameters_dict.keys():
                fit_parameters_dict['f_bnd'] = [0., 1.]
            if 'A0' not in fit_parameters_dict.keys():
                fit_parameters_dict['A0'] = 0.5
            if 'A_bnd' not in fit_parameters_dict.keys():
                fit_parameters_dict['A_bnd'] = [None, None]
            if 'ApB0' not in fit_parameters_dict.keys():
                fit_parameters_dict['ApB0'] = 1.0
            if 'ApB_bnd' not in fit_parameters_dict.keys():
                fit_parameters_dict['ApB_bnd'] = [None, None]
        if fit == 'first order':
            if 'C0' not in fit_parameters_dict.keys():
                fit_parameters_dict['C0'] = 0.0
            if 'C_bnd' not in fit_parameters_dict.keys():
                fit_parameters_dict['C_bnd'] = [None, None]

        if fit == 'standard' or fit == 'first order':
            f0 = fit_parameters_dict['f0']
            initial_soln = _minimize(obj_func_1d,
                                     f0,
                                     method='L-BFGS-B',
                                     bounds=[(0., 1.)])
            f0b = initial_soln.x[0]
            A0 = fit_parameters_dict['A0']
            ApB0 = fit_parameters_dict['ApB0']
            f_bnd = fit_parameters_dict['f_bnd']
            A_bnd = fit_parameters_dict['A_bnd']
            ApB_bnd = fit_parameters_dict['ApB_bnd']

            p0 = [A0, ApB0, f0b]
            final_soln_standard = _minimize(obj_func_full,
                                            p0,
                                            method='L-BFGS-B',
                                            bounds=[A_bnd, ApB_bnd, f_bnd])
            A, Bs, f = final_soln_standard.x
            results_dict = {
                'A': A,
                'B': Bs - A,
                'f': f,
                'r': _rbutils.p_to_r(f, dim)
            }
            if fit == 'first order':
                C0 = fit_parameters_dict['C0']
                C_bnd = fit_parameters_dict['C_bnd']
                p0 = [A, Bs, C0, f]
                final_soln_1storder = _minimize(
                    obj_func_1st_order_full,
                    p0,
                    method='L-BFGS-B',
                    bounds=[A_bnd, ApB_bnd, C_bnd, f_bnd])

                A, Bs, C, f = final_soln_1storder.x
                results_dict = {
                    'A': A,
                    'B': Bs - A,
                    'C': C,
                    'f': f,
                    'r': _rbutils.p_to_r(f, dim)
                }

        return results_dict
示例#10
0
 def maximize_loglike(self, method='SLSQP', **kwargs):
     return _minimize(self.negative_loglike,
                      self.parameter_values(),
                      jac=self.negative_d_loglike,
                      method=method,
                      **kwargs)
示例#11
0
    def fit(xdata, ydata, one_freq_dict=None, weights=None):
        xdata = _np.asarray(xdata) - 1  #discount Clifford-inverse
        ydata = _np.asarray(ydata)
        if weights is None:
            weights = _np.array([1.] * len(xdata))
        if one_freq_dict is None:

            def obj_func_full(params):
                A, Bs, f = params
                return _np.sum((A + (Bs - A) * f**xdata - ydata)**2 / weights)

            def obj_func_1st_order_full(params):
                A1, B1s, C1, f1 = params
                return _np.sum(
                    (A1 +
                     (B1s - A1 + C1 * xdata) * f1**xdata - ydata)**2 / weights)
        else:
            xdata_correction = _np.array(one_freq_dict['m_list']) - 1
            n_0_list = one_freq_dict['n_0_list']
            N_list = one_freq_dict['N_list']
            K_list = one_freq_dict['K_list']
            ydata_correction = []
            weights_correction = []
            indices_to_delete = []
            for m in xdata_correction:
                for i, j in enumerate(xdata):
                    if j == m:
                        ydata_correction.append(ydata[i])
                        weights_correction.append(weights[i])
                        #                        indices_to_delete.append([i])
                        indices_to_delete.append(i)
            ydata_correction = _np.array(ydata_correction)
            xdata = _np.delete(xdata, indices_to_delete)
            ydata = _np.delete(ydata, indices_to_delete)
            weights = _np.delete(weights, indices_to_delete)
            #            print(one_freq_dict)
            #            print('xdata:')
            #            print(xdata)
            #            print('ydata:')
            #            print(ydata)
            #            print('weights:')
            #            print(weights)
            case_1_dict = {}
            case_3_dict = {}
            for m, n_0, N, K in zip(xdata_correction, n_0_list, N_list,
                                    K_list):
                case_1_dict[m, n_0, N, K] = (n_0 - 1)**(n_0 - 1) * (N - n_0)**(
                    N - n_0) / ((N - 1)**(N - 1))
                case_3_dict[m, n_0, N,
                            K] = (n_0**n_0 * (N - n_0 - 1)**(N - n_0 - 1)) / (
                                (N - 1)**(N - 1))

            def obj_func_full(params):
                A, Bs, f = params
                ###                print("A="+str(A))
                ###                print("Bs="+str(Bs))
                ###                print("f="+str(f))
                if (((A < 0 or A > 1) or (Bs < 0 or Bs > 1))
                        or (f <= 0 or f >= 1)):
                    return 1e6
                total_0 = _np.sum(
                    (A + (Bs - A) * f**xdata - ydata)**2 / weights)
                #                correction_1 = _np.sum((A+(Bs-A)*f**xdata_correction - ydata_correction)**2  / weights_correction)
                correction_2 = 0
                for m, n_0, N, K in zip(xdata_correction, n_0_list, N_list,
                                        K_list):
                    #                    print("type(A)="+str(type(A)))
                    #                    print("type(Bs)="+str(type(Bs)))
                    #                    print("type(f)="+str(type(f)))
                    #                    print("type(m)="+str(type(m)))
                    #                    print("type(n_0)="+str(type(n_0)))
                    #                    print("type(N)="+str(type(N)))
                    #                    print("type(K)="+str(type(K)))
                    F = A + (Bs - A) * f**m
                    #                    print("A="+str(A))
                    #                    print("Bs="+str(Bs))
                    #                    print("f="+str(f))
                    #                    print("m="+str(m))
                    try:
                        assert len(F) == 1
                        F = F[0]
                    except:
                        pass
#                    print("F="+str(F))
#                    print("K="+str(K))
#                    print("n_0="+str(n_0))
#                    print("N="+str(N))
                    if F < (n_0 - 1) / (N - 1):
                        ###                        print('Case 1 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print(case_1_dict[m,n_0,N,K])
                        #                        print("type(F)="+str(type(F)))
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        #                        correction_2 += K * _np.log(F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))#+1e-10)
                        correction_2 += K * _np.log(
                            F * case_1_dict[m, n_0, N, K])  #+1e-10
##                        correction_2 += (F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))**K
                    elif F >= (n_0 - 1) / (N - 1) and F < n_0 / (N - 1):
                        ###                        print('Case 2 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(F**n_0 * (1-F)**(N-n_0))
                        #                        print(case_1_dict[m,n_0,N,K])
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        correction_2 += K * _np.log(F**n_0 *
                                                    (1 - F)**(N - n_0))
##                        correction_2 += (F**n_0 * (1-F)**(N-n_0))**K
                    elif F >= n_0 / (N - 1):
                        ###                        print('Case 3 m='+str(m))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(case_3_dict[m,n_0,N,K])
                        correction_2 += K * _np.log(
                            (1 - F) * case_3_dict[m, n_0, N, K])  #+1e-10)
#                        correction_2 += K * _np.log((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))#+1e-10)
##                        correction_2 += ((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))**K
                    else:
                        print((n_0 - 1) / (N - 1))
                        print(n_0 / (N - 1))
                        print("F=" + str(F))
                        print("A=" + str(A))
                        print("Bs=" + str(Bs))
                        print("f=" + str(f))
                        print("m=" + str(m))
                        raise ValueError(
                            "F does not fall within any physical bounds for m="
                            + str(m) + "!")
#                return total_0 - correction_1 + correction_2
                return total_0 - correction_2

            def obj_func_1st_order_full(params):
                #                print("1st order call")
                A1, B1s, C1, f1 = params
                total_0 = _np.sum(
                    (A1 +
                     (B1s - A1 + C1 * xdata) * f1**xdata - ydata)**2 / weights)
                #                correction_1 = _np.sum((A1+(B1s-A1+C1*xdata_correction)*f1**xdata_correction-ydata_correction)**2 / weights)
                correction_2 = 0
                for m, n_0, N, K in zip(xdata_correction, n_0_list, N_list,
                                        K_list):
                    #                    print("type(A)="+str(type(A)))
                    #                    print("type(Bs)="+str(type(Bs)))
                    #                    print("type(f)="+str(type(f)))
                    #                    print("type(m)="+str(type(m)))
                    #                    print("type(n_0)="+str(type(n_0)))
                    #                    print("type(N)="+str(type(N)))
                    #                    print("type(K)="+str(type(K)))
                    F = A + (Bs - A) * f**m
                    #                    print("A="+str(A))
                    #                    print("Bs="+str(Bs))
                    #                    print("f="+str(f))
                    #                    print("m="+str(m))
                    try:
                        assert len(F) == 1
                        F = F[0]
                    except:
                        pass
#                    print("F="+str(F))
#                    print("K="+str(K))
#                    print("n_0="+str(n_0))
#                    print("N="+str(N))
                    if F < (n_0 - 1) / (N - 1):
                        ###                        print('Case 1 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print(case_1_dict[m,n_0,N,K])
                        #                        print("type(F)="+str(type(F)))
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        #                        correction_2 += K * _np.log(F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))#+1e-10)
                        correction_2 += K * _np.log(
                            F * case_1_dict[m, n_0, N, K])  #+1e-10
##                        correction_2 += (F * (n_0-1)**(n_0-1)*(N-n_0)**(N-n_0) / ((N-1)**(N-1)))**K
                    elif F >= (n_0 - 1) / (N - 1) and F < n_0 / (N - 1):
                        ###                        print('Case 2 m='+str(m))
                        ###                        print('F='+str(F))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(F**n_0 * (1-F)**(N-n_0))
                        #                        print(case_1_dict[m,n_0,N,K])
                        #                        print("F="+str(F))
                        #                        print("K="+str(K))
                        #                        print("n_0="+str(n_0))
                        #                        print("N="+str(N))
                        correction_2 += K * _np.log(F**n_0 *
                                                    (1 - F)**(N - n_0))
##                        correction_2 += (F**n_0 * (1-F)**(N-n_0))**K
                    elif F >= n_0 / (N - 1):
                        ###                        print('Case 3 m='+str(m))
                        ###                        print('1-F='+str(1-F))
                        ###                        print(case_3_dict[m,n_0,N,K])
                        correction_2 += K * _np.log(
                            (1 - F) * case_3_dict[m, n_0, N, K])  #+1e-10)
#                        correction_2 += K * _np.log((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))#+1e-10)
##                        correction_2 += ((1-F) * (n_0**n_0 * (N-n_0-1)**(N-n_0-1))/((N-1)**(N-1)))**K
                    else:
                        print((n_0 - 1) / (N - 1))
                        print(n_0 / (N - 1))
                        print("F=" + str(F))
                        print("A=" + str(A))
                        print("Bs=" + str(Bs))
                        print("f=" + str(f))
                        print("m=" + str(m))
                        raise ValueError(
                            "F does not fall within any physical bounds for m="
                            + str(m) + "!")
#                return total_0 - correction_1 + correction_2
                return total_0 - correction_2

        def obj_func_1d(f):
            A = 0.5
            Bs = 1.
            #            print("f="+str(f))
            return obj_func_full([A, Bs, f])


#        print('xdata:')
#        print(xdata)
#        print('ydata:')
#        print(ydata)
#        print('weights:')
#        print(weights)

        initial_soln = _minimize(obj_func_1d,
                                 f0,
                                 method='L-BFGS-B',
                                 bounds=[(0., 1.)])
        f0b = initial_soln.x[0]
        p0 = [A0, ApB0, f0b]
        final_soln = _minimize(obj_func_full,
                               p0,
                               method='L-BFGS-B',
                               bounds=[A_bnd, ApB_bnd, f_bnd])
        A, Bs, f = final_soln.x

        p0 = [A, Bs, C0, f]
        final_soln_1storder = _minimize(obj_func_1st_order_full,
                                        p0,
                                        method='L-BFGS-B',
                                        bounds=[A_bnd, ApB_bnd, C_bnd, f_bnd])

        A1, B1s, C1, f1 = final_soln_1storder.x

        #        if A > 0.6 or A1 > 0.6 or A < 0.4 or A1 < 0.4:
        #            print("Warning: Asymptotic fit parameter is not within [0.4,0.6].")

        #        if C1 > 0.1 or C1 < -0.1:
        #            print("Warning: Additional parameter in first order fit is significantly non-zero")

        return {
            'A': A,
            'B': Bs - A,
            'f': f,
            'F_avg': _rbutils.f_to_F_avg(f, dim),
            'r': _rbutils.f_to_r(f, dim),
            'A1': A1,
            'B1': B1s - A1,
            'C1': C1,
            'f1': f1,
            'F_avg1': _rbutils.f_to_F_avg(f1, dim),
            'r1': _rbutils.f_to_r(f1, dim)
        }
示例#12
0
文件: _cSPoC.py 项目: CoastSunny/meet
def cSPoAvgC(X, opt='max', num=1, log=True, bestof=15):
    """
    canonical Soure Power Average Correlation analysis (cSPoAvgC)
    
    For the dataset X, find a linear filters wx, such
    that the average correlation of the amplitude envelopes of wx.T.dot(X)
    and (wx.T.dot(X)).mean(-1) is maximized, i.e. it seeks a spatial filter
    to maximize the correlation of amplitude envelopes and their average.

    The solution is inspired by and derived by the original cSPoC-Analysis

    Reference:
    ----------
    Dahne, S., et al., Finding brain oscillations with power dependencies
    in neuroimaging data, NeuroImage (2014),
    http://dx.doi.org/10.1016/j.neuroimage.2014.03.075

    Notes:
    ------
    Dataset X must be a 3d a array of shape
    (channels x datapoints x trials).
    
    If log == True, then the log transform is taken before the average
    inside the trial
    
    If X is of complex type, it is assumed that these is the analytic
    representations of X, i.e., the hilbert transform was applied before.
    
    The filters are in the columns of the filter matrices Wx
    
    The input data can be filtered as:    
    
    np.tensordot(Wx, X, axes=(0,0))

    Input:
    ------
    -- X numpy array - the dataset of shape px x N x tr, where px is the
                       number of sensors, N the number of data-points, tr
                       the number of trials. If X is of complex
                       type it is assumed that this already is the
                       analytic representation of X, i.e. the hilbert
                       transform already was applied.
    -- opt {'max', 'min', 'zero'} - determines whether the correlation coefficient
                            should be maximized - seeks for positive
                            correlations ('max', default);
                            or minimized - seeks for anti-correlations
                            ('min'), ('zero') seeks for zero correlation
    -- num int > 0 - determine the number of filters that will be derived.
                     This depends also on the rank of X, if X is 2d, the
                     number of filter pairs will be: min([num, rank(X)]).
                     If X is 3d, the array is flattened into a 2d array
                     before calculating the rank
    -- log {True, False} - compute the correlation between the log-
                           transformed envelopes, if datasets come in
                           epochs, then the log is taken before averaging
                           inside the epochs, defaults to True
    -- bestof int > 0 - the number of restarts for the optimization of the
                        individual filter pairs. The best filter over all
                        these restarts with random initializations is
                        chosen, defaults to 15.
    Output:
    -------
    corr - numpy array - the canonical correlations of the amplitude
                         envelopes for each filter
    Wx - numpy array - the filters for X, each filter is in a column of Wx
                       (if num==1: Wx is 1d)
    """
    #check input
    assert isinstance(X, _np.ndarray), "X must be numpy array"
    assert (X.ndim == 3), "X must be 3D numpy array"
    assert opt in ['max', 'min', 'zero'], "\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\""
    assert isinstance(num, int), "\"num\" must be integer > 0"
    assert num > 0, "\"num\" must be integer > 0"
    assert log in [True, False, 0, 1], "\"log\" must be a boolean (True " \
                                     + "or False)"
    assert isinstance(bestof, int), "\"bestof\" must be integer > 0"
    assert bestof > 0, "\"bestof\" must be integer > 0"
    # get whitening transformation for X
    Whx, sx = _linalg.svd(X.reshape(X.shape[0], -1).real,
                          full_matrices=False)[:2]
    #get rank
    px = (
        sx >
        (_np.max(sx) * _np.max([X.shape[0], _np.prod(X.shape[1:])]) *
         _np.finfo(X.dtype).eps)).sum()
    Whx = Whx[:, :px] / sx[:px][_np.newaxis]
    # whiten the data
    X = _np.tensordot(Whx, X, axes=(0, 0))
    # get hilbert transform
    if not _np.iscomplexobj(X):
        X = _signal.hilbert(X, axis=1)
    # get the final number of filters
    num = _np.min([num, px])
    # determine if correlation coefficient is maximized or minimized
    if opt == 'max': sign = -1
    elif opt == 'min': sign = 1
    elif opt == 'zero': sign = 0
    else: raise ValueError("\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\"")
    # start optimization
    for i in xrange(num):
        if i == 0:
            # get first filter
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_env_corr_avg,
                          fprime=None,
                          x0=_np.random.random(px) * 2 - 1,
                          args=(X, sign, log),
                          m=100,
                          approx_grad=False,
                          iprint=1)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _env_corr_avg(optres[k, 0], X, sign, log)[0]
            # determine the best_result
            best = _np.argmin(optres[:, 1])
            # save results
            if sign != 0:
                corr = [sign * optres[best, 1]]
            else:
                corr = [optres[best, 1]]
            filt = optres[best, 0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            Bx = _linalg.svd(_np.atleast_2d(filt.T),
                             full_matrices=True)[2][i:].T
            Xb = _np.tensordot(Bx, X, axes=(0, 0))
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_env_corr_avg,
                          fprime=None,
                          x0=_np.random.random(px - i) * 2 - 1,
                          args=(Xb, sign, log),
                          m=100,
                          approx_grad=False,
                          iprint=1)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _env_corr_avg(optres[k, 0], Xb, sign, log)[0]
            # determine the best result
            best = _np.argmin(optres[:, 1])
            # save results
            if sign != 0:
                corr = corr + [sign * optres[best, 1]]
            else:
                corr = corr + [optres[best, 1]]
            filt = _np.column_stack([filt, Bx.dot(optres[best, 0])])
    # project filters back into original (un-whitened) channel space
    Wx = Whx.dot(filt)
    #normalize filters to have unit length
    Wx = Wx / _np.sqrt(_np.sum(Wx**2, 0))
    return _np.array(corr), Wx
示例#13
0
文件: _cSPoC.py 项目: CoastSunny/meet
def cSPoC(X, Y, opt='max', num=1, log=True, bestof=15, x_ind=None, y_ind=None):
    """
    canonical Soure Power Correlation analysis (cSPoC)
    
    For the datasets X and Y, find a pair of linear filters wx and wy, such
    that the correlation of the amplitude envelopes wx.T.dot(X) and
    wy.T.dot(Y) is maximized.

    Reference:
    ----------
    Dahne, S., et al., Finding brain oscillations with power dependencies
    in neuroimaging data, NeuroImage (2014),
    http://dx.doi.org/10.1016/j.neuroimage.2014.03.075

    Notes:
    ------
    Datasets X and Y can be either 2d numpy arrays of shape
    (channels x datapoints) or 3d array of shape
    (channels x datapoints x trials).
    For 3d arrays the average envelope in each trial is calculated if x_ind
    (or y_ind, respectively) is None. If they are set, the difference of
    the instantaneous amplitude envelope at x_ind/y_ind and the average
    envelope is calculated for each trial.
    If log == True, then the log transform is taken before the average
    inside the trial

    If X and/or Y are of complex type, it is assumed that these are the
    analytic representations of X and Y, i.e., the hilbert transform was
    applied before.
    
    The filters are in the columns of the filter matrices Wx and Wy,
    for 2d input the data can be filtered as:
    
    np.dot(Wx.T, X)
    
    for 3d input:
    
    np.tensordot(Wx, X, axes=(0,0))

    Input:
    ------
    -- X numpy array - the first dataset of shape px x N (x tr), where px
                       is the number of sensors, N the number of data-
                       points, tr the number of trials. If X is of complex
                       type it is assumed that this already is the
                       analytic representation of X, i.e. the hilbert
                       transform already was applied.
    -- Y is the second dataset of shape py x N (x tr)
    -- opt {'max', 'min'} - determines whether the correlation coefficient
                            should be maximized - seeks for positive
                            correlations ('max', default);
                            or minimized - seeks for anti-correlations
                            ('min')
    -- num int > 0 - determine the number of filter-pairs that will be
                     derived. This depends also on the ranks of X and Y,
                     if X and Y are 2d the number of filter pairs will be:
                     min([num, rank(X), rank(Y)]). If X and/or Y are 3d the
                     array is flattened into a 2d array before calculating
                     the rank
    -- log {True, False} - compute the correlation between the log-
                           transformed envelopes, if datasets come in
                           epochs, then the log is taken before averaging
                           inside the epochs, defaults to True
    -- bestof int > 0 - the number of restarts for the optimization of the
                        individual filter pairs. The best filter over all
                        these restarts with random initializations is
                        chosen, defaults to 15.
    -- x_ind int - the time index (-X.shape[1] <= x_ind < X.shape[1]) where
                   the difference of the instantaneous envelope and the
                   average envelope is determined for X
    -- y_ind int - the time index (-Y.shape[1] <= y_ind < Y.shape[1]) where
                   the difference of the instantaneous envelope and the
                   average envelope is determined for Y

    Output:
    -------
    corr - numpy array - the canonical correlations of the amplitude
                         envelopes for each filter
    Wx - numpy array - the filters for X, each filter is in a column of Wx
                       (if num==1: Wx is 1d)
    Wy - numpy array - the filters for Y, each filter is in a column of Wy
                       (if num==1: Wy is 1d)
    """
    #check input
    assert isinstance(X, _np.ndarray), "X must be numpy array"
    assert (X.ndim == 2 or X.ndim == 3), "X must be 2D or 3D numpy array"
    assert isinstance(Y, _np.ndarray), "X must be numpy array"
    assert (Y.ndim == 2 or Y.ndim == 3), "Y must be 2D or 3D numpy array"
    assert X.shape[-1] == Y.shape[-1], "Size of last dimension in X and" \
                                     + " Y must be equal"
    assert opt in ['max', 'min'], "\"opt\" must be \"max\" or \"min\""
    assert isinstance(num, int), "\"num\" must be integer > 0"
    assert num > 0, "\"num\" must be integer > 0"
    assert log in [True, False, 0, 1], "\"log\" must be a boolean (True " \
                                     + "or False)"
    assert isinstance(bestof, int), "\"bestof\" must be integer > 0"
    assert bestof > 0, "\"bestof\" must be integer > 0"
    if x_ind != None:
        assert X.ndim == 3, "If x_ind is set, X must be 3d array!"
        assert isinstance(x_ind, int), "x_ind must be integer!"
        assert ((x_ind >= -X.shape[1]) and
                (x_ind < X.shape[1])), "x_ind must match the range of " +\
                                       "X.shape[1]"
    if y_ind != None:
        assert Y.ndim == 3, "If y_ind is set, Y must be 3d array!"
        assert isinstance(y_ind, int), "y_ind must be integer!"
        assert ((y_ind >= -Y.shape[1]) and
                (y_ind < Y.shape[1])), "y_ind must match the range of " +\
                                       "Y.shape[1]"
    # get whitening transformations
    # for X
    Whx, sx = _linalg.svd(X.reshape(X.shape[0], -1).real,
                          full_matrices=False)[:2]
    #get rank
    px = (
        sx >
        (_np.max(sx) * _np.max([X.shape[0], _np.prod(X.shape[1:])]) *
         _np.finfo(X.dtype).eps)).sum()
    Whx = Whx[:, :px] / sx[:px][_np.newaxis]
    # for Y
    Why, sy = _linalg.svd(Y.reshape(Y.shape[0], -1).real,
                          full_matrices=False)[:2]
    # get rank
    py = (
        sy >
        (_np.max(sy) * _np.max([Y.shape[0], _np.prod(Y.shape[1:])]) *
         _np.finfo(Y.dtype).eps)).sum()
    Why = Why[:, :py] / sy[:py][_np.newaxis]
    # whiten the data
    X = _np.tensordot(Whx, X, axes=(0, 0))
    Y = _np.tensordot(Why, Y, axes=(0, 0))
    # get hilbert transform (if not complex)
    if not _np.iscomplexobj(X):
        X = _signal.hilbert(X, axis=1)
    if not _np.iscomplexobj(Y):
        Y = _signal.hilbert(Y, axis=1)
    # get the final number of filters
    num = _np.min([num, px, py])
    # determine if correlation coefficient is maximized or minimized
    if opt == 'max': sign = -1
    else: sign = 1
    # start optimization
    for i in xrange(num):
        if i == 0:
            # get first pair of filters
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_env_corr,
                          fprime=None,
                          x0=_np.random.random(px + py) * 2 - 1,
                          args=(X, Y, sign, log, x_ind, y_ind),
                          m=100,
                          approx_grad=False,
                          iprint=1)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _env_corr(optres[k, 0], X, Y, sign, log, x_ind,
                                         y_ind)[0]
            # determine the best
            best = _np.argmin(optres[:, 1])
            # save results
            corr = [sign * optres[best, 1]]
            filt = optres[best, 0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            Bx = _linalg.svd(_np.atleast_2d(filt[:px].T),
                             full_matrices=True)[2][i:].T
            Xb = _np.tensordot(Bx, X, axes=(0, 0))
            By = _linalg.svd(_np.atleast_2d(filt[px:].T),
                             full_matrices=True)[2][i:].T
            Yb = _np.tensordot(By, Y, axes=(0, 0))
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_env_corr,
                          fprime=None,
                          x0=_np.random.random(px + py - 2 * i) * 2 - 1,
                          args=(Xb, Yb, sign, log, x_ind, y_ind),
                          m=100,
                          approx_grad=False,
                          iprint=1)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _env_corr(optres[k, 0], Xb, Yb, sign, log,
                                         x_ind, y_ind)[0]
            # determine the best
            best = _np.argmin(optres[:, 1])
            # save results
            corr = corr + [sign * optres[best, 1]]
            filt = _np.column_stack([
                filt,
                _np.hstack([
                    Bx.dot(optres[best, 0][:px - i]),
                    By.dot(optres[best, 0][px - i:])
                ])
            ])
    # project filters back into original (un-whitened) channel space
    Wx = Whx.dot(filt[:px])
    Wy = Why.dot(filt[px:])
    #normalize filters to have unit length
    Wx = Wx / _np.sqrt(_np.sum(Wx**2, 0))
    Wy = Wy / _np.sqrt(_np.sum(Wy**2, 0))
    return _np.array(corr), Wx, Wy
示例#14
0
文件: _cSPoC.py 项目: CoastSunny/meet
def cSPoAC(X, tau=1, opt='max', num=1, log=True, bestof=15, x_ind=None):
    """
    canonical Soure Power Auto-Correlation analysis (cSPoAC)
    
    For the dataset X, find a linear filters wx, such
    that the correlation of the amplitude envelopes wx.T.dot(X[...,:-tau])
    and wx.T.dot(X[...,tau:]) is maximized, i.e. it seeks a spatial filter
    to maximize the auto-correlation of amplitude envelopes for a shift
    of tau (example for X being 2D).
    Alternatively tau can be an array of indices to X, such that
    X[...,tau[0]] and X[...,tau[1]] defince the lag.

    The solution is inspired by and derived by the original cSPoC-Analysis

    Reference:
    ----------
    Dahne, S., et al., Finding brain oscillations with power dependencies
    in neuroimaging data, NeuroImage (2014),
    http://dx.doi.org/10.1016/j.neuroimage.2014.03.075

    Notes:
    ------
    Dataset X can be either a 2d numpy array of shape
    (channels x datapoints) or 3d a array of shape
    (channels x datapoints x trials). For 2d array tau denotes a lag in
    the time domain, for 3d tau denotes a trial-wise lag.
    For 3d arrays the average envelope in each trial is calculated if x_ind
    is None. If it is set, the difference of the instantaneous amplitude
    at x_ind and the average envelope is calculated for each trial.

    If log == True, then the log transform is taken before the average
    inside the trial
    If X is of complex type, it is assumed that these is the analytic
    representations of X, i.e., the hilbert transform was applied before.
    
    The filters are in the columns of the filter matrices Wx and Wy,
    for 2d input the data can be filtered as:
    
    np.dot(Wx.T, X)
    
    for 3d input:
    
    np.tensordot(Wx, X, axes=(0,0))

    Input:
    ------
    -- X numpy array - the dataset of shape px x N (x tr), where px is the
                       number of sensors, N the number of data-points, tr
                       the number of trials. If X is of complex
                       type it is assumed that this already is the
                       analytic representation of X, i.e. the hilbert
                       transform already was applied.
    -- tau int or array of ints - the lag to calculate the autocorrelation,
                                  if X.ndim==2, this is a time-wise lag, if
                                  X.ndim==3, this is a trial-wise lag.
                                  Alternatively tau can be an array of ints,
                                  such that X[...,tau[0]] and X[...,tau[1]]
                                  are correlated.
    -- opt {'max', 'min', 'zero'} - determines whether the correlation coefficient
                            should be maximized - seeks for positive
                            correlations ('max', default);
                            or minimized - seeks for anti-correlations
                            ('min'), ('zero') seeks for zero correlation
    -- num int > 0 - determine the number of filters that will be derived.
                     This depends also on the rank of X, if X is 2d, the
                     number of filter pairs will be: min([num, rank(X)]).
                     If X is 3d, the array is flattened into a 2d array
                     before calculating the rank
    -- log {True, False} - compute the correlation between the log-
                           transformed envelopes, if datasets come in
                           epochs, then the log is taken before averaging
                           inside the epochs, defaults to True
    -- bestof int > 0 - the number of restarts for the optimization of the
                        individual filter pairs. The best filter over all
                        these restarts with random initializations is
                        chosen, defaults to 15.
    -- x_ind int - the time index (-X.shape[1] <= x_ind < X.shape[1]),
                   where the difference of the instantaneous envelope and
                   the average envelope is determined for X

    Output:
    -------
    corr - numpy array - the canonical correlations of the amplitude
                         envelopes for each filter
    Wx - numpy array - the filters for X, each filter is in a column of Wx
                       (if num==1: Wx is 1d)
    """
    #check input
    assert isinstance(X, _np.ndarray), "X must be numpy array"
    assert (X.ndim == 2 or X.ndim == 3), "X must be 2D or 3D numpy array"
    if isinstance(X, _np.ndarray):
        try:
            X[..., tau[0]]
            X[..., tau[1]]
        except:
            raise ValueError("""
                    If tau is an array, tau[0] and tau[1] must be subarrays
                    of valid indices to X defining a certain lag, i.e., 
                    the correlation between X[...,tau[0]] and X[...,tau[1]]
                     is optimized""")
    else:
        assert isinstance(tau, int), "tau must be an array integer-valued"
        assert ((tau > 0) and (tau < (X.shape[-1]-1))
            ), "tau must be >0 and smaller than the last dim of X " +\
               "minus 1."
        tau = np.array([
            np.arange(0, X.shape[-1] - tau, 1),
            np.arange(tau, X.shape[-1], 1)
        ])
    assert opt in ['max', 'min', 'zero'], "\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\""
    assert isinstance(num, int), "\"num\" must be integer > 0"
    assert num > 0, "\"num\" must be integer > 0"
    assert log in [True, False, 0, 1], "\"log\" must be a boolean (True " \
                                     + "or False)"
    assert isinstance(bestof, int), "\"bestof\" must be integer > 0"
    assert bestof > 0, "\"bestof\" must be integer > 0"
    if x_ind != None:
        assert X.ndim == 3, "If x_ind is set, X must be 3d array!"
        assert isinstance(x_ind, int), "x_ind must be integer!"
        assert ((x_ind >= -X.shape[1]) and
                (x_ind < X.shape[1])), "x_ind must match the range of " +\
                                       "X.shape[1]"
    # get whitening transformation for X
    Whx, sx = _linalg.svd(X.reshape(X.shape[0], -1).real,
                          full_matrices=False)[:2]
    #get rank
    px = (
        sx >
        (_np.max(sx) * _np.max([X.shape[0], _np.prod(X.shape[1:])]) *
         _np.finfo(X.dtype).eps)).sum()
    Whx = Whx[:, :px] / sx[:px][_np.newaxis]
    # whiten the data
    X = _np.tensordot(Whx, X, axes=(0, 0))
    # get hilbert transform
    if not _np.iscomplexobj(X):
        X = _signal.hilbert(X, axis=1)
    # get the final number of filters
    num = _np.min([num, px])
    # determine if correlation coefficient is maximized or minimized
    if opt == 'max': sign = -1
    elif opt == 'min': sign = 1
    elif opt == 'zero': sign = 0
    else: raise ValueError("\"opt\" must be \"max\", " +\
            "\"min\" or \"zero\"")
    # start optimization
    for i in xrange(num):
        if i == 0:
            # get first filter
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_env_corr_same,
                          fprime=None,
                          x0=_np.random.random(px) * 2 - 1,
                          args=(X[...,
                                  tau[0]], X[...,
                                             tau[1]], sign, log, x_ind, x_ind),
                          m=100,
                          approx_grad=False,
                          iprint=1)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _env_corr_same(optres[k, 0], X[..., tau[0]],
                                              X[..., tau[1]], sign, log, x_ind,
                                              x_ind)[0]
            # determine the best_result
            best = _np.argmin(optres[:, 1])
            # save results
            if sign != 0:
                corr = [sign * optres[best, 1]]
            else:
                corr = [optres[best, 1]]
            filt = optres[best, 0]
        else:
            # get consecutive pairs of filters
            # project data into null space of previous filters
            # this is done by getting the right eigenvectors of the filter
            # maxtrix corresponding to vanishing eigenvalues
            Bx = _linalg.svd(_np.atleast_2d(filt.T),
                             full_matrices=True)[2][i:].T
            Xb = _np.tensordot(Bx, X, axes=(0, 0))
            # get best parameters and function values of each run
            optres = _np.array([
                _minimize(func=_env_corr_same,
                          fprime=None,
                          x0=_np.random.random(px - i) * 2 - 1,
                          args=(Xb[..., tau[0]], Xb[..., tau[1]], sign, log,
                                x_ind, x_ind),
                          m=100,
                          approx_grad=False,
                          iprint=1)[:2] for k in xrange(bestof)
            ])
            # somehow, _minimize sometimes returns the wrong function value,
            # so this is re-dertermined here
            for k in xrange(bestof):
                optres[k, 1] = _env_corr_same(optres[k, 0], Xb[..., tau[0]],
                                              Xb[..., tau[1]], sign, log,
                                              x_ind, x_ind)[0]
            # determine the best result
            best = _np.argmin(optres[:, 1])
            # save results
            if sign != 0:
                corr = corr + [sign * optres[best, 1]]
            else:
                corr = corr + [optres[best, 1]]
            filt = _np.column_stack([filt, Bx.dot(optres[best, 0])])
    # project filters back into original (un-whitened) channel space
    Wx = Whx.dot(filt)
    #normalize filters to have unit length
    Wx = Wx / _np.sqrt(_np.sum(Wx**2, 0))
    return _np.array(corr), Wx
示例#15
0
def maximize_loglike(model, *arg, ctol=1e-6, options={}, metaoptions=None, two_stage_constraints=False, pre_bhhh=0, cache_data=False, sessionlog=False, sourcecode=None, stash='generic'):
	"""
	Maximize the log likelihood of the model.
	
	Parameters
	----------
	arg : optimizers
	ctol : float
		The global convergence tolerance
	options : dict
		Options to pass to the outer minimizer
	metaoptions :
		Options to pass to the inner minimizers
	two_stage_constraints : bool
	pre_bhhh : int
		How many BHHH steps should be attempted before switching to the other optimizers.
		No convergence checks are made nor bounds or constraints enforced on these
		simple pre-steps, but it can be useful to help warm-start other algorithms (esp. SLSQP)
		that can enforce these things but perform badly with poor starting points.
	"""
	
	if sourcecode is not None:
		if isinstance(sourcecode, str):
			model.new_xhtml_sourcecode(sourcecode, frame_offset=1)
		else:
			model.new_xhtml_sourcecode('sourcecode', frame_offset=1)
	
	
	if sessionlog:
		if isinstance(sessionlog, int) and sessionlog>1:
			model.session_log(loglevel=sessionlog)
		else:
			model.session_log()
	
	if metaoptions is not None:
		options['options'] = metaoptions
	stat = runstats()
	if not model.Data_UtilityCE_manual.active():
		stat.start_process('setup')
		model.tearDown()
		if not model.is_provisioned() and model._ref_to_db is not None:
			model.provision(idca_avail_ratio_floor = model.option.idca_avail_ratio_floor, cache=cache_data)
		model.setUp(False, cache=cache_data)

	if pre_bhhh:
		stat.start_process('pre_bhhh')
		while pre_bhhh>0:
			try:
				model._bhhh_simple_step()
			except RuntimeError:
				pre_bhhh = 0
			pre_bhhh -= 1

	from ...metamodel import MetaModel
	if isinstance(model, MetaModel):
		stat.start_process('setup_meta')
		model.setUp()
	
	try:
		self.df.cache_alternatives()
	except:
		pass

	x0 = model.parameter_values()
	if model.option.calc_null_likelihood:
		stat.start_process('null_likelihood')
		llnull = model.loglike_null()
		model._LL_null = float(llnull)

	if model.option.weight_choice_rebalance:
		stat.start_process("weight choice rebalance")
		if model.weight_choice_rebalance():
			stat.write("rebalanced weights and choices")

	if model.option.weight_autorescale:
		stat.start_process("weight autorescale")
		stat.write(model.auto_rescale_weights())
	
	stat.end_process()
	try:
		use_cobyla = model.use_cobyla
	except AttributeError:
		use_cobyla = False
	if use_cobyla:
		constraints = model._build_constraints(include_bounds=True)
	else:
		constraints = model._build_constraints()

	model._built_constraints_cache = constraints

	bounds=None
	if model.option.enforce_bounds and not use_cobyla:
		bounds=model.parameter_bounds()

	if two_stage_constraints:
		if len(arg):
			ot = model.optimizers(*arg, ctol=ctol)
		else:
			ot = model.optimizers(*_default_optimizers(model), ctol=ctol)
		r0 = _minimize(lambda z: 0.123999, x0, method=ot, options=options, bounds=None, constraints=() )
		print(model)
		if bounds or constraints:
			ctol = None
		if len(arg):
			ot = model.optimizers(*arg, ctol=ctol)
		else:
			ot = model.optimizers(*_default_optimizers(model), ctol=ctol)
		r = _minimize(lambda z: 0.123999, x0, method=ot, options=options, bounds=bounds, constraints=constraints )
		r.prepend(r0)
		r.stats.prepend_timing(stat)
	else:
		if bounds or constraints:
			if "BHHH" not in arg:
				ctol = None
		if len(arg):
			ot = model.optimizers(*arg, ctol=ctol)
		else:
			ot = model.optimizers(*_default_optimizers(model), ctol=ctol)
		r = _minimize(lambda z: 0.123999, x0, method=ot, options=options, bounds=bounds, constraints=constraints )
		r.stats.prepend_timing(stat)
	
#	try:
#		r_message = r.intermediate[-1].message
#	except:
#		r_message = ""
#	if r_message == 'Positive directional derivative for linesearch' and len(constraints) and model.option.enforce_constraints:
#		model.option.enforce_constraints = False
#		r0 = r
#		r1 = _minimize(lambda z: 0.123999, x0, method=ot, options=options, bounds=bounds, constraints=() )
#		model.option.enforce_constraints = True
#		r = _minimize(lambda z: 0.123999, x0, method=ot, options=options, bounds=bounds, constraints=constraints )

	if model.logger():
		model.logger().log(30,"Preliminary Results\n{!s}".format(model.art_params().ascii()))

	ll = model.loglike()

	if model.option.weight_autorescale and model.get_weight_scale_factor() != 1.0:
		r.stats.start_process("weight unrescale")
		model.restore_scale_weights()
		model.clear_cache()
		ll = model.loglike(cached=False)

	if model.option.calc_std_errors:
		r.stats.start_process("parameter covariance")
		if len(constraints) == 0:
			model.calculate_parameter_covariance()
			holdfasts = model.parameter_holdfast_array
		else:
			holdfasts = model._compute_constrained_covariance(constraints=constraints)
		from ...linalg import possible_overspecification
		overspec = possible_overspecification(model.hessian_matrix, holdfasts)
		if overspec:
			r.stats.write("WARNING: Model is possibly over-specified (hessian is nearly singular).")
			r.possible_overspecification = []
			for eigval, ox, eigenvec in overspec:
				if eigval=='LinAlgError':
					r.possible_overspecification.append( (eigval, [ox,], ["",]) )
				else:
					paramset = list(numpy.asarray(model.parameter_names())[ox])
					r.possible_overspecification.append( (eigval, paramset, eigenvec[ox]) )
			model.possible_overspecification = r.possible_overspecification

	r.stats.start_process("cleanup")
	r.stats.number_threads = model.option.threads
	ll = float(ll)
	model._LL_best = ll
	model._LL_current = ll
	r.loglike = ll
	if model.option.calc_null_likelihood:
		r.loglike_null = llnull
	r.stats.end_process()
	# peak memory usage
	from ..sysinfo import get_peak_memory_usage
	r.peak_memory_usage = get_peak_memory_usage()
	# installed memory
	try:
		import psutil
	except ImportError:
		pass
	else:
		mem = psutil.virtual_memory().total
		if mem >= 2.0*2**30:
			mem_size = str(mem/2**30) + " GiB"
		else:
			mem_size = str(mem/2**20) + " MiB"
		r.installed_memory = mem_size
	# save
	model._set_estimation_run_statistics_pickle(r.stats.pickled_dictionary())
	model.maximize_loglike_results = r
	del model._built_constraints_cache
	if model.logger():
		model.logger().log(30,"Final Results\n{!s}".format(model.art_params().ascii()))

	try:
		self.df.uncache_alternatives()
	except:
		pass

	try:
		r_success = r.success
	except AttributeError:
		r_success = False

	if not r_success:
		warnings.warn("Model.maximize_loglike did not succeed normally, you might try Model.doctor() to see if there are any identified problems", stacklevel=2)

	try:
		specific_warning_note = model._specific_warning_notes()
	except AttributeError:
		pass
	else:
		if specific_warning_note:
			r.stats.write(specific_warning_note)

	if stash:
		model.stash_parameters(ticket=stash)

	model._display_finalized_status(r)

	return r
示例#16
0
文件: quantizer.py 项目: knttnk/NQLib
    def design_GB(system: "IdealSystem",
                  *,
                  q: StaticQuantizer,
                  dim: int,
                  T: int = None,  # TODO: これより下を反映
                  gain_wv: float = inf,
                  verbose: bool = False,
                  method: str = "SLSQP") -> Tuple["DynamicQuantizer", float]:
        """
        Finds the stable and optimal dynamic quantizer `Q` for `system`.
        Returns `(Q, E)`. `E` is the estimation of E(Q)[1]_,[2]_,[3]_.

        If NQLib couldn't find `Q` such that
        ```
        all([
            Q.N == dim,
            Q.gain_wv() < gain_wv,
            Q.is_stable,
        ])
        ```
        becomes `True`, this method returns `(None, inf)`.

        Parameters
        ----------
        system : IdealSystem
            Must be stable and SISO.
        q : StaticQuantizer
            Returned dynamic quantizer contains this static quantizer.
            `q.delta` is important to estimate E(Q).
        dim : int
            Upper limit of order of `Q`. Must be greater than `0`.
        T : int, None or numpy.inf, optional
            Estimation time. Must be greater than `0`.
            (The default is `None`, which means infinity).
        gain_wv : float, optional
            Upper limit of gain w->v . Must be greater than `0`.
            (The default is `numpy.inf`).
        verbose : bool, optional
            Whether to print the details.
            (The default is `False`).
        method : str, optional  TODO: 削除
            Specifies which method should be used in
            `scipy.optimize.minimize()`.
            (The default is `""`, which implies that this function doesn't
            specify the method).

        Returns
        -------
        (Q, E) : Tuple[DynamicQuantizer, float]
            `Q` is the stable and optimal dynamic quantizer for `system`.
            `E` is estimation of E(Q).

        Raises
        ------
        ValueError
            If `system` is unstable.

        References
        ----------
        .. [5] Y. Minami and T. Muromaki: Differential evolution-based
           synthesis of dynamic quantizers with fixed-structures; International
           Journal of Computational Intelligence and Applications, Vol. 15,
           No. 2, 1650008 (2016)
        """  # TODO: ドキュメント更新
        # if T is None:
        #     # TODO: support infinity evaluation time
        #     return None, inf
        if not isinstance(dim, int):
            raise TypeError("`dim` must be `numpy.inf` or an instance of `int`.")
        elif dim < 1:
            raise ValueError("`dim` must be greater than `0`.")
        # TODO: siso のチェック

        # functions to calculate E from
        # x = [an, ..., a1, cn, ..., c1]  (n = dim)
        def a(x):
            """
            a = [an, ..., a1]
            """
            return x[:dim]

        def c(x):
            """
            c = [cn, ..., c1]
            """
            return x[dim:]

        def _Q(x):
            # controllable canonical form
            _A = block([
                [zeros((dim - 1, 1)), eye(dim - 1)],
                [-a(x)],
            ])
            _B = block([
                [zeros((dim - 1, 1))],
                [1],
            ])
            _C = c(x)
            return DynamicQuantizer(
                A=_A,
                B=_B,
                C=_C,
                q=q,
            )

        def obj(x):
            return _Q(x)._objective_function(system,
                                             T=T,
                                             gain_wv=gain_wv)

        # optimize
        if verbose:
            print("Designing a quantizer with gradient-based optimization.")
            print(f"The optimization method is '{method}'.")
            print("### Message from `scipy.optimize.minimize()`. ###")
        result = _minimize(obj,
                           x0=zeros(2 * dim)[0],
                           tol=0,
                           options={
                               "disp": verbose,
                               'maxiter': 10000,
                               'ftol': 1e-10,
                           },
                           method=method)
        if verbose:
            print(result.message)
            print("### End of message from `scipy.optimize.minimize()`. ###")

        if result.success and obj(result.x) <= 0:
            Q = _Q(result.x)
            E = system.E(Q)
            if verbose:
                print("Optimization succeeded.")
                print(f"E = {E}")
        else:
            Q = None
            E = inf
            if verbose:
                print("Optimization failed.")

        return Q, E