Ejemplo n.º 1
0
    def _calculate(self, nominal_ys, ps):
        res = nominal_ys - ps
        pct = (15.87, 84.13)

        a, b = array([percentile(ri, pct) for ri in res.T]).T
        a, b = nabs(a), nabs(b)
        return (a + b) * 0.5
def calculateFrequency(d11, z):
    colSize = d11.shape[1]
    diff = nzeros([3, colSize], dtype=float)
    diff[0, :] = nabs(nround(
        (d11[int((z - 3.)) - 1] - d11[int((z - 2.)) - 1])))
    diff[1, :] = nabs(nround(
        (d11[int((z - 2.)) - 1] - d11[int((z - 1.)) - 1])))
    diff[2, :] = nabs(nround((d11[int((z - 1.)) - 1] - d11[int(z) - 1])))
    w = scp.stats.mode(diff)
    return w
Ejemplo n.º 3
0
def correct_knee(thigh_w,
                 shank_w,
                 thigh_r,
                 shank_r,
                 R_shank_thigh,
                 knee_axis_kwargs=None):
    """
    Correct the knee position based on the computed knee axis.

    Parameters
    ----------
    thigh_w : numpy.ndarray
        (N, 3) array of angular velocities measured by the thigh sensor.
    shank_w : numpy.ndarray
        (N, 3) array of angular velocities measured by the shank sensor.
    thigh_r : numpy.ndarray
        Initial knee joint center to thigh sensor vector.
    shank_r : numpy.ndarray
        Initial knee joint center to shank sensor vector.
    R_shank_thigh : numpy.ndarray
        (3, 3) Rotation matrix from shank to thigh sensors.
    knee_axis_kwargs : dict, optional
        Knee axis computation key-word arguments. See KneeAxis.

    Returns
    -------
    thigh_r_corr : numpy.ndarray
        Corrected knee joint center to thigh sensor vector.
    shank_r_corr : numpy.ndarray
        Corrected knee joint center to shank sensor vector.
    """
    if knee_axis_kwargs is None:
        knee_axis_kwargs = {}
    # compute the knee axis
    ka = KneeAxis(**knee_axis_kwargs)
    thigh_j, shank_j = ka.compute(thigh_w, shank_w)

    # check the sign of the major component of the axes when rotated into the same frame
    shank_j_thigh = R_shank_thigh @ shank_j
    if sign(shank_j_thigh[argmax(nabs(shank_j_thigh))]) != sign(thigh_j[argmax(
            nabs(thigh_j))]):
        shank_j *= -1

    # compute the corrections for the joint centers
    tmp = (sum(thigh_r * thigh_j) + sum(shank_r * shank_j)) / 2
    thigh_r_corr = thigh_r - thigh_j * tmp
    shank_r_corr = shank_r - shank_j * tmp

    return thigh_r_corr, shank_r_corr
Ejemplo n.º 4
0
def energy_min(scores):
    # get known score fractions.
    normed = scores/((scores + scores.T) + (scores == 0))

    M = diag((normed*normed).sum(axis=0)) - (normed*normed.T)

    return nabs(svd(M)[-1][-1])
    def p_values(self, t_statistic):

        # -------------------------------------------------------------------
        # Реализация расчета p-значений функции распределения Стьюдента
        # в библиотеке SciPy. Более оптимальна в исполнении в силу того, что
        # алгоритмы реализованы на Fortran
        # -------------------------------------------------------------------

        return array(2 * (1 - t.cdf(nabs(t_statistic), (self.n - self.k - 1))))
def FFT_MultiFrequency_update(s1, s2):
    Fs = np.array([[31.]])
    #% Sampling frequency
    T = 1. / Fs
    #% Sample time
    L = 512.
    #% Length of signal
    t = ndot(narange(0., L), T)

    NFFT1 = float(pow(2, pyNextPow2(L)))
    #% Next power of 2 from length of y
    s1[0, :] = s1[0, :] - nmean(s1[0, :])
    s2[0, :] = s2[0, :] - nmean(s2[0, :])
    X1 = nabs(np.fft(s1, NFFT1) / L)
    Y1 = nabs(np.fft(s2, NFFT1) / L)
    f1 = ndot(Fs / 2., np.linspace(0., 1., (NFFT1 / 2. + 1.)))

    [g1, d1, d2] = checkValidity_MultiFrquency(X1, Y1, f1, NFFT1)
    return [d1, X1, Y1, f1, NFFT1, d2, g1]
Ejemplo n.º 7
0
 def __gamma_ratio(self, x, y):
     
     module = nabs(nmax((x, y)))
 
     if module <= 100.0:
         return self.__gamma(x) / self.__gamma(y)
     else:
         return (power(2, x - y) * 
                  self.__gamma_ratio(x * 0.5, y * 0.5) * 
                  self.__gamma_ratio(x * 0.5 + 0.5, y * 0.5 + 0.5))
Ejemplo n.º 8
0
def genSampling(pdf, nitn, tol):
    pdf[pdf > 1] = 1
    K = np.sum(pdf.flatten())

    minIntr = 1e99
    minIntrVec = zeros(pdf.shape)
    stat = np.zeros(nitn, )
    for n in np.arange(0, nitn):
        tmp = zeros(pdf.shape)
        while abs(np.sum(tmp.flatten()) - K) > tol:
            tmp = rand(*pdf.shape) < pdf

        TMP = ifft2(tmp / pdf)
        if nmax(nabs(TMP.flatten()[1:])) < minIntr:
            minIntr = nmax(nabs(TMP.flatten()[1:]))
            minIntrVec = tmp
        stat[n] = nmax(nabs(TMP.flatten()[1:]))

    actpctg = np.sum(minIntrVec.flatten()) / float(minIntrVec.size)
    mask = minIntrVec
    return mask, stat, actpctg
Ejemplo n.º 9
0
def genPDF(imSize, p, pctg, distType=2, radius=0, seed=0):
    minval = 0
    maxval = 1
    val = 0.5

    if len(imSize) == 1:
        imSize = [imSize, 1]
    sx = imSize[0]
    sy = imSize[1]
    PCTG = np.floor(pctg * sx * sy)
    if not np.any(np.asarray(imSize) == 1):
        x, y = np.meshgrid(np.linspace(-1, 1, sy), np.linspace(-1, 1, sx))
        if distType == 1:
            r = np.fmax(nabs(x), nabs(y))
        else:
            r = sqrt(x**2 + y**2)
            r = r / nmax(nabs(r.flatten()))
    else:
        r = nabs(np.linspace(-1, 1, max(sx, sy)))

    idx = np.where(r < radius)
    pdf = (1 - r)**p
    pdf[idx] = 1
    if np.floor(sum(pdf.flatten())) > PCTG:
        raise ValueError('infeasible without undersampling dc, increase p')

    # begin bisection
    while 1:
        val = minval / 2.0 + maxval / 2.0
        pdf = (1 - r)**p + val
        pdf[pdf > 1] = 1
        pdf[idx] = 1
        N = np.floor(sum(pdf.flatten()))
        if N > PCTG:
            maxval = val
        if N < PCTG:
            minval = val
        if N == PCTG:
            break
    return pdf
Ejemplo n.º 10
0
    def get_state(self, ind):
        """
        Check whether or not a time point is in steady state.

        Parameters
        ----------
        ind : int
            Index of data to check

        Returns
        -------
        steady_state : bool
            Whether or not the sensor is in a steady state.
        """
        if abs(self.a_mag[ind] - self.gravity) > self.acc_thresh:
            return False
        elif (nabs(self.w[ind] - self.w[ind - 1]) > self.delta_w_thresh).any():
            return False
        elif (nabs(self.w[ind] - self.w_bias) > self.w_thresh).any():
            return False
        else:
            return True
Ejemplo n.º 11
0
	def GetEnergyDistribution(self, maxEnergy, numEnergyPoints):
		"""Calculate double ionization energy distribution
		"""
		#maxE = 1.0
		E = linspace(0, maxEnergy, numEnergyPoints)
		dpde = zeros((len(E), len(E)), dtype=double)

		#to store double ionization prob before interpolation
		doubleIonProb = 0

		#for every l-pair (l1, l2) we have a set of (i1, i2) states
		#In order to create an approx to dp/de_1 de_2, we make a 2d 
		#interpolation for each l-shell
		#and add coherently to the dpde array
		P = self.ContinuumProjector
		for l1, l2, lPop in self.RadialProjections:
			#number of states in this l-shell (matching energy filter)
			n1 = P.SingleStatesLeft.GetNumberOfStates(l1, self.IsIonizedFilter)
			n2 = P.SingleStatesRight.GetNumberOfStates(l2, self.IsIonizedFilter)

			#sum up angular momentum components
			pop = (nabs(lPop)**2).sum(axis=0).reshape(n1, n2)

			#add contribution to total double ionization prob.
			doubleIonProb += sum(pop.flatten())

			#scale states with 1/dE_1 dE_2
			E1 = P.SingleStatesLeft.GetRadialEnergies(l1, self.IsIonizedFilter)
			E2 = P.SingleStatesRight.GetRadialEnergies(l2, self.IsIonizedFilter)
			pop[:-1,:-1] /= outer(diff(E1), diff(E2))
			
			#2d interpolation over all states in this shell
			interpolator = RectBivariateSpline(E1[:-1], E2[:-1], pop[:-1, :-1], 
				kx=1, ky=1, s=0.0)

			#evaluate on given energy points, and add to total dpde
			dpde += interpolator(E, E)

		#Calculate double ionization probability to check interpolation
		absErrIonProb = abs(doubleIonProb - sum(dpde.flatten()) * diff(E)[0]**2)
		relErrIonProb = absErrIonProb/doubleIonProb
		self.Logger.debug("Integrated double ionization probability: %s" 
			% doubleIonProb)
		if relErrIonProb > 0.01:
			warnMsg = "Integrating dP/dE1dE2 does not give correct double ionization probability"
			self.Logger.warning("%s: relerr = %s, abserr = %s." 
				% (warnMsg, relErrIonProb, absErrIonProb))
		else :
			self.Logger.debug("Difference in double ionization probability after interpolation: %s" % absErrIonProb)

		return E, dpde
Ejemplo n.º 12
0
	def GetEnergyDistribution(self, maxEnergy, numEnergyPoints):
		"""Calculate single ionization energy distribution
		"""

		E = linspace(0, maxEnergy, numEnergyPoints)
		dpde = zeros(len(E), dtype=double)

		#to store single ionization prob before interpolation
		singleIonProb = 0

		#for every l-pair (lIon, lBound) we have a set of (iIon, iBound) states
		#In order to create an approx to dp/de, we make an interpolation for 
		#each l-shell and add incoherently to the dpde array
		P = self.ContinuumProjector
		for lIon, lBound, lPop in self.RadialProjections:
			#number of states in this l-shell (matching energy filter)
			nIon = P.SingleStatesLeft.GetNumberOfStates(lIon, 
				self.IsIonizedFilter)
			nBound = P.SingleStatesRight.GetNumberOfStates(lBound, 
				self.IsBoundFilter)

			#sum up angular momentum components
			pop = 2.0 * \
				(nabs(lPop)**2).sum(axis=0).reshape(nIon, nBound).transpose()

			#add contribution to total double ionization prob.
			singleIonProb += sum(pop.flatten())

			
			EIon = P.SingleStatesLeft.GetRadialEnergies(lIon, 
				self.IsIonizedFilter)

			#Iterate over all bound states for this l-combination
			for iBound in range(nBound):
				#scale states with 1/diff(EIon)
				curPop = pop[iBound, :-1] / diff(EIon)
				#interpolate over ionized populations, add to total dpde
				dpde += interp(E, EIon[:-1], curPop)
				
		#Calculate single ionization probability to check interpolation
		absErrIonProb = abs(singleIonProb - sum(dpde.flatten()) * diff(E)[0])
		relErrIonProb = absErrIonProb/singleIonProb
		self.Logger.debug("Integrated single ionization probability: %s" % singleIonProb)
		if relErrIonProb > 0.01:
			warnMsg = "Integrating dP/dE does not give correct single ionization probability"
			self.Logger.warning("%s: relerr = %s, abserr = %s." % (warnMsg, relErrIonProb, absErrIonProb))
		else :
			self.Logger.debug("Difference in single ionization probability after interpolation: %s" % absErrIonProb)	
	
		return E, dpde
Ejemplo n.º 13
0
	def GetDoubleIonizationProbability(self):
		"""Calculate double ionization probability
		"""
		#check if double ionization is already calculated, if so, 
		#just use buffered value
		if not self.IonizationIsCalculated:
			I = sum([(nabs(pop)**2).sum() 
				for l1,l2,pop in self.RadialProjections])
			self.DoubleIonizationProbability = I
			self.SingleIonizationProbability = \
				self.TotalIonizationProbability - I
			self.IonizationIsCalculated = True
			
		return self.DoubleIonizationProbability
Ejemplo n.º 14
0
def get_point_model_1_3(model=(1, 3), max_n=10, distribution=(0, 0, 0, 0, 0)):
    # sample parameters in logarithmic scale
    RStarSample = sample_value(
        0, 2, distributions[distribution[0]])  # rate of new star born
    fPlanets = sample_value(
        -1, 0,
        distributions[distribution[1]])  # probability that star has planets
    nEnvironment = sample_value(
        -1, 0,
        distributions[distribution[2]])  # probability that it is earth-like
    fIntelligence = sample_value(-3, 0, distributions[
        distribution[3]])  # prob. some intelligent beings start to exist
    fCivilization = sample_value(-2, 0, distributions[
        distribution[4]])  # prob. this beings are possible to communicate
    #       with other planets

    logN = log10(max_n)
    fLife = life_dist(mean=0, sigma=50)  # probability that life begins
    fLifeEks = log10(fLife)

    # N = RStarSample + fPlanets + nEnvironment + fLifeEks + fInteligence + fCivilization + L
    logL = logN - (RStarSample + fPlanets + nEnvironment + fLifeEks +
                   fIntelligence + fCivilization)
    if 3 not in model:  # if only model 1
        return [float(logL.real)]  # rate of birth of new civilisation

    N = 10**logN
    f = 10**(RStarSample + fPlanets + nEnvironment + fLifeEks + fIntelligence +
             fCivilization)
    # logL = log10(N) - log10(f)   ... model 1 would return logL like this
    A = 1
    B = 0.004 / (9.461e12**3)  # number density of stars as per Wikipedia
    a4 = 5.13342 * 1e10 * 10**(
        fPlanets + nEnvironment) * B  # estimated number of earth-like planets
    a14 = f * A  # rate of new intelligent civilisation born
    candidates = list(roots(
        [a4 * a14, 0, 0, a14,
         -N]))  # zeros of function: a4 * a14 * x^4 + a14 * x - N
    # actually we want to solve equation: f*A * (L + 5.13342*1e10*10**(fPlanets+nEnvironment)*B * L**4) = N
    L_initial_guess = N / (
        a14 * log10(a14)**4
    )  # just a bad approximation to detect true candidate
    candidates.sort(key=lambda x: nabs(x - L_initial_guess))
    logL3 = log10(candidates[0])
    if 1 in model:
        return [float(logL.real), float(logL3.real)]
    return [float(logL3.real)]
Ejemplo n.º 15
0
def sample_value(fromv, tov, distribution="fixed"):
    # random value from "distribution" distribution from "10**fromv" to "10**tov"
    if distribution == "loguniform":
        return uniform(fromv, tov)  # loguniform from [10**fromv, 10**tov]
    elif distribution == "uniform":
        return log10(uniform(10**fromv, 10**tov))  # uniform from [fromv, tov]
    elif distribution == "halfgauss":
        sigma_half_gauss = (
            10**tov - 10**fromv
        ) / 3  # divided by 3 so that 3*sigma expands along whole interval
        return log10(nabs(normal(0, sigma_half_gauss)) + 10**fromv)  # gauss
    elif distribution == "lognormal":
        mean = (tov + fromv) / 2  # half of interval
        sigma = (
            tov - mean
        ) / 3  # divided by 3 so that 3*sigma expands along whole interval
        return normal(mean, sigma)  # lognormal
    return tov  # if distribution=="fixed"
Ejemplo n.º 16
0
    def _on_select2(self, xmin, xmax):
        self._ax2.set_title(None)
        if self.datetime:
            t2 = date2num(self._t2)
        else:
            t2 = self._t2
        start, stop = searchsorted(t2, (xmin, xmax))

        self._b = zeros(self._a.shape)
        self._tb = t2[stop - self._a.size:stop]

        self._b[-(stop - start):] = self._x2[start:stop]

        A = fftpack.fft(self._a)
        B = fftpack.fft(self._b)

        Ar = -A.conjugate()

        self._ind_shift = argmax(nabs(fftpack.ifft(
            Ar * B)))  # convolve the arrays and find the peak

        self.t1_0 = self._ta[0]  # find the time value in the first signal
        self.t2_0 = self._tb[
            self._ind_shift]  # find the time value in the second signal

        t_pl = self._ta + (self.t2_0 - self.t1_0)
        x_pl = self._a

        if self.line is not None:
            self.line.set_data([], [])
        self.line, = self._ax2.plot(t_pl, x_pl, color='C2', label='Aligned')

        if self.datetime:
            self.t1_0 = to_datetime(num2date(self.t1_0)).tz_localize(None)
            self.t2_0 = to_datetime(num2date(self.t2_0)).tz_localize(None)

        # time difference between the signals
        self.t_diff = self.t2_0 - self.t1_0
Ejemplo n.º 17
0
 def __init__(self, x, dN):
     
     self.x, self.dN = nabs(x), dN
Ejemplo n.º 18
0
zfft = fftshift(fftn(z))
plt.imshow(log(abs(zfft)))
datacursor(display='single')
#start with
# z=0
seed(0)
gauss_random = np.random.rand(int(sz_x), int(sz_y))
gauss_random = gauss_random / np.max(np.abs(gauss_random))
z += gauss_random

#now blur with a Gaussian

zsmooth = convolve(z, gauss_filter, mode='valid')

if np.min(zsmooth) < 0:
    zsmooth += nabs(np.min(zsmooth))
zsmooth /= np.max(np.abs(zsmooth))
zsmooth *= max_phase
zsmooth -= (np.min(zsmooth) - min_phase)

plt.imshow(zsmooth)
colorbar()
# datacursor(display='single')

mask = zsmooth.copy()
mask[mask <= (0.0)] = False
mask[mask > (0.0)] = True
mask = np.array(mask, dtype='bool')
plt.imshow(mask)
plt.imshow(zsmooth * mask)
Ejemplo n.º 19
0
def hip_from_frames(pelvis_AF, thigh_AF, R, side, zero_angles=False):
    """
    Compute the hip joint angles from the segment fixed axes computed during the calibration.

    Parameters
    ----------
    pelvis_AF : tuple
        Tuple of the x, y, and z axes of the pelvis anatomical frame.
    thigh_AF : tuple
        Tuple of the x, y, and z axes of the thigh anatomical frame.
    R : numpy.ndarray
        Nx3x3 array of rotation matrices from the thigh sensor frame to the pelvis sensor frame.
    side : {'left', 'right'}
        Side the angles are being computed for.
    zero_angles : bool, optional
        Remove any offset from zero at the start of the angles. Default is False.

    Returns
    -------
    angles : numpy.ndarray
        Nx3 array of hip angles in degrees, with the first column being Flexion - Extension,
        second column being Ad / Abduction, and the third column being Internal - External Rotation.
    """
    # get pelvis anatomical frame
    X = pelvis_AF[0]
    Y = pelvis_AF[1]
    Z = pelvis_AF[2]

    # get the thigh anatomical frame and rotate into pelvis frame
    x = R @ thigh_AF[0]
    y = R @ thigh_AF[1]
    z = R @ thigh_AF[2]

    # form the joint axes
    e1 = Z.copy()
    e3 = y.copy()

    e2 = cross(e3, e1)
    e2 /= norm(e2, axis=1, keepdims=True)

    # compute angles
    sgn = sum(cross(X, e2) * Z, axis=1)
    sgn /= nabs(sgn)
    fe = atan2(sgn * norm(cross(X, e2), axis=1), sum(X * e2, axis=1))

    if side == 'right':
        # ad / abduction calculation
        aa = -pi / 2 + arccos(sum(e1 * e3, axis=1))

        # internal - external rotation sign calculation
        sgn = sum(cross(x, e2) * -y, axis=1)
    elif side == 'left':
        aa = pi / 2 - arccos(sum(e1 * e3, axis=1))
        sgn = sum(cross(x, e2) * y, axis=1)
    else:
        raise ValueError("side must be either 'left' or 'right'.")

    sgn /= nabs(sgn)
    ier = atan2(sgn * norm(cross(x, e2), axis=1), sum(x * e2, axis=1))

    angles = stack((fe, aa, ier), axis=1) * 180 / pi
    if zero_angles:
        angles -= angles[0, :]

    return angles
Ejemplo n.º 20
0
    def observe(self, dict_in):
        """
        Loads observation model parameters into a dictionary, 
        performs the forward model and provides an initial solution.

        Args:
        dict_in (dict): Dictionary which will be overwritten with 
        all of the observation model parameters, forward model 
        observation 'y', and initial estimate 'x_0'.
        """
        warnings.simplefilter("ignore", np.ComplexWarning)
        #########################################
        #fetch observation model parameters here#
        #########################################

        if (self.str_type[:11] == 'convolution'
                or self.str_type == 'compressed_sensing'):
            wrf = self.get_val('wienerfactor', True)
            str_domain = self.get_val('domain', False)
            noise_pars = defaultdict(int)  #build a dict to generate the noise
            noise_pars['seed'] = self.get_val('seed', True)
            noise_pars['variance'] = self.get_val('noisevariance', True)
            noise_pars['distribution'] = self.get_val('noisedistribution',
                                                      False)
            noise_pars['mean'] = self.get_val('noisemean', True)
            noise_pars['interval'] = self.get_val('noiseinterval',
                                                  True)  #uniform
            noise_pars['size'] = dict_in['x'].shape
            dict_in['noisevariance'] = noise_pars['variance']

            if self.str_type == 'compressed_sensing':
                noise_pars['complex_noise'] = 1
            if dict_in['noisevariance'] > 0:
                dict_in['n'] = noise_gen(noise_pars)
            else:
                dict_in['n'] = 0

        elif self.str_type == 'classification':
            #partition the classification dataset into an 'observed' training set
            #and an unobserved evaluation/test set, and generate features
            dict_in['x_train'] = {}
            dict_in['x_test'] = {}
            dict_in['y_label'] = {}
            dict_in['x_feature'] = {}
            dict_in['n_training_samples'] = 0
            dict_in['n_testing_samples'] = 0
            shuffle = self.get_val('shuffle', True)
            if shuffle:
                shuffleseed = self.get_val('shuffleseed', True)
            training_proportion = self.get_val('trainingproportion', True)
            classes = dict_in['x'].keys()
            #partition and generate numeric class labels
            for _class_index, _class in enumerate(classes):
                class_size = len(dict_in['x'][_class])
                training_size = int(training_proportion * class_size)
                dict_in['n_training_samples'] += training_size
                dict_in['n_testing_samples'] += class_size - training_size
                if shuffle:
                    np.random.seed(shuffleseed)
                    indices = np.random.permutation(class_size)
                else:
                    indices = np.array(range(class_size), dtype='uint16')
                dict_in['x_train'][_class] = indices[:training_size]
                dict_in['x_test'][_class] = indices[training_size:]
                dict_in['y_label'][_class] = _class_index
        else:
            raise ValueError('unsupported observation model')
        ################################################
        #compute the forward model and initial estimate#
        ################################################
        if self.str_type == 'convolution':
            H = self.Phi

            H.set_output_fourier(False)
            dict_in['Hx'] = H * dict_in['x']
            dict_in['y'] = dict_in['Hx'] + dict_in['n']
            #regularized Wiener filtering in Fourier domain
            H.set_output_fourier(True)
            dict_in['x_0'] = real(
                ifftn(~H * dict_in['y'] /
                      (H.get_spectrum_sq() + wrf * noise_pars['variance'])))
            # dict_in['x_0'] = real(ifftn(~H * dict_in['y'])) %testing only
            H.set_output_fourier(False)
            #compute bsnr
            self.compute_bsnr(dict_in, noise_pars)
        elif self.str_type == 'convolution_downsample':
            Phi = self.Phi
            #this order is important in the config file
            D = Phi.ls_ops[1]
            H = Phi.ls_ops[0]
            H.set_output_fourier(False)
            if self.get_val('spatialblur', True):
                dict_in['Phix'] = D * convolve(dict_in['x'], H.kernel, 'same')
                dict_in['Hxpn'] = convolve(dict_in['x'], H.kernel,
                                           'same') + dict_in['n']
            else:
                dict_in['Phix'] = Phi * dict_in['x']
                dict_in['Hxpn'] = H * dict_in['x'] + dict_in['n']
            dict_in['Hx'] = dict_in['Phix']
            #the version of y without downsampling
            dict_in['DHxpn'] = np.zeros((D * dict_in['Hxpn']).shape)
            if dict_in['n'].__class__.__name__ == 'ndarray':
                dict_in['n'] = D * dict_in['n']
            dict_in['y'] = dict_in['Hx'] + dict_in['n']
            DH = fftn(Phi * nd_impulse(dict_in['x'].shape))
            DHt = conj(DH)
            Hty = fftn(D * (~Phi * dict_in['y']))
            HtDtDH = np.real(DHt * DH)
            # dict_in['x_0'] = ~D*real(ifftn(Hty /
            #                                (HtDtDH +
            #                                 wrf * noise_pars['variance'])))
            dict_in['x_0'] = ~D * dict_in['y']
            #optional interpolation
            xdim = dict_in['x'].ndim
            xshp = dict_in['x'].shape
            if self.get_val('interpinitialsolution', True):
                if xdim == 2:
                    if self.get_val('useimresize', True):
                        interp_vals = imresize(
                            dict_in['y'],
                            tuple(D.ds_factor *
                                  np.asarray(dict_in['y'].shape)),
                            interp='bicubic')
                    else:
                        grids = np.mgrid[[
                            slice(0, xshp[j]) for j in xrange(xdim)
                        ]]
                        grids = tuple(
                            [grids[i] for i in xrange(grids.shape[0])])
                        sampled_coords = np.mgrid[[
                            slice(D.offset[j], xshp[j], D.ds_factor[j])
                            for j in xrange(xdim)
                        ]]
                        values = dict_in['x_0'][[
                            coord.flatten() for coord in sampled_coords
                        ]]
                        points = np.vstack([
                            sampled_coords[i, Ellipsis].flatten()
                            for i in xrange(sampled_coords.shape[0])
                        ]).transpose()  #pts to interp
                        interp_vals = griddata(points,
                                               values,
                                               grids,
                                               method='cubic',
                                               fill_value=0.0)
                else:
                    values = dict_in[
                        'y']  #we're not using blank values, different interpolation scheme..
                    dsfactors = np.asarray(
                        [int(D.ds_factor[j]) for j in xrange(values.ndim)])
                    valshpcorrect = (
                        np.asarray(values.shape) -
                        np.asarray(xshp, dtype='uint16') / dsfactors)
                    valshpcorrect = valshpcorrect / np.asarray(dsfactors,
                                                               dtype='float32')
                    interp_coords = iprod(*[
                        np.arange(0, values.shape[j] - valshpcorrect[j], 1.0 /
                                  D.ds_factor[j]) for j in xrange(values.ndim)
                    ])
                    interp_coords = np.array([el for el in interp_coords
                                              ]).transpose()
                    interp_vals = map_coordinates(values,
                                                  interp_coords,
                                                  order=3,
                                                  mode='nearest').reshape(xshp)
                    # interp_vals = map_coordinates(values,interp_coords,order=3,mode='nearest')
                    # cut off the edges
                    # if xdim == 2:
                    # interp_vals = interp_vals[0:xshp[0],0:xshp[1]]
                    # else:
                    interp_vals = interp_vals[0:xshp[0], 0:xshp[1], 0:xshp[2]]
                dict_in['x_0'] = interp_vals
            elif self.get_val('inputinitialsoln', False) != '':
                init_soln_inputsec = Input(
                    self.ps_parameters, self.get_val('inputinitialsoln',
                                                     False))
                dict_in['x_0'] = init_soln_inputsec.read({}, True)
            self.compute_bsnr(dict_in, noise_pars)

        elif self.str_type == 'convolution_poisson':
            dict_in['mp'] = self.get_val('maximumphotonspervoxel', True)
            dict_in['b'] = self.get_val('background', True)
            H = self.Phi
            if str_domain == 'fourier':
                H.set_output_fourier(False)  #return spatial domain object
                orig_shape = dict_in['x'].shape
                Hspec = np.zeros(orig_shape)
                dict_in['r'] = H * dict_in['x']
                k = dict_in['mp'] / nmax(dict_in['r'])
                dict_in['r'] = k * dict_in['r']
                #normalize the output image to have the same
                #maximum photon count as the ouput image
                dict_in['x'] = k * dict_in['x']
                dict_in['x'] = crop_center(
                    dict_in['x'], dict_in['r'].shape).astype('float32')
                #the spatial domain measurements, before photon counts
                dict_in['fb'] = dict_in['r'] + dict_in['b']
                #lambda of the poisson distn
                noise_pars['ary_mean'] = dict_in['fb']
                #specifying the poisson distn
                noise_distn2 = self.get_val('noisedistribution2', False)
                noise_pars['distribution'] = noise_distn2
                #generating quantized (uint16) poisson measurements
                # dict_in['y'] = (noise_gen(noise_pars)+dict_in['n']).astype('uint16').astype('int32')
                dict_in['y'] = noise_gen(noise_pars) + crop_center(
                    dict_in['n'], dict_in['fb'].shape)
                dict_in['y'][dict_in['y'] < 0] = 0
            elif str_domain == 'evaluation':  #are given the observation, which is stored in 'x'
                dict_in['y'] = dict_in.pop('x')
            else:
                raise Exception('domain not supported: ' + str_domain)
            dict_in['x_0'] = ((~H) * (dict_in['y'])).astype(dtype='float32')
            dict_in['y_padded'] = pad_center(dict_in['y'],
                                             dict_in['x_0'].shape)

        elif self.str_type == 'compressed_sensing':
            Fu = self.Phi
            dict_in['Hx'] = Fu * dict_in['x']
            dict_in['y'] = dict_in['Hx'] + dict_in['n']
            dict_in['x_0'] = (~Fu) * dict_in['y']
            dict_in['theta_0'] = angle(dict_in['x_0'])
            dict_in['theta_0'] = su.phase_unwrap(dict_in['theta_0'],
                                                 dict_in['dict_global_lims'],
                                                 dict_in['ls_local_lim_secs'])
            dict_in['magnitude_0'] = nabs(dict_in['x_0'])
            if self.get_val('maskinitialsoln', True):
                dict_in['theta_0'] *= dict_in['mask']
                dict_in['magnitude_0'] *= dict_in['mask']
            dict_in['x_0'] = dict_in['magnitude_0'] * exp(
                1j * dict_in['theta_0'])
            self.compute_bsnr(dict_in, noise_pars)
        #store the wavelet domain version of the ground truth
        if np.iscomplexobj(dict_in['x']):
            dict_in['w'] = [
                self.W * dict_in['x'].real, self.W * dict_in['x'].imag
            ]
        else:
            dict_in['w'] = [self.W * dict_in['x']]
    def __mean_absolute_error(self, fact, pred):

        deflection_mod = nsum(nabs(subtract(fact, pred)))
        count = pred.shape[0]
        return divide(deflection_mod, pred.shape[0])
Ejemplo n.º 22
0
def hip(pelvis_frames, thigh_frames, side):
    """
    Compute hip angles.

    Parameters
    ----------
    pelvis_frames : numpy.ndarray
        Nx3x3 array of anatomical frames of the pelvis in a world frame. N time samples of 3x3 matrices, of which the
        first column is the pelvis X-axis, second column is the pelvis Y-axis, and third column is the pelvis Z-axis
    thigh_frames: numpy.ndarray
        Nx3x3 array of anatomical frames of the thigh for N time points in the world frame. Each 3x3 matrix is comprised
        of columns of thigh x-axis, y-axis, and z-axis, in that order.
    side : {'left', 'right'}
        Side the angles are being computed for. Used for sign determination of angles.

    Returns
    -------
    hip_angles : numpy.ndarray
        Nx3 array of hip angles, with the first column being flexion-extension, second column being ad/abduction,
        and third column being internal-external rotation.

    References
    ----------
    Wu et al. "ISB recommendations on definitions of joint coordinate systems of various joints for the reporting of
    human joint motion - part I: ankle, hip, and spine." J. of Biomech. Vol. 35. 2002.
    Dabirrahmani et al. "Modification of the Grood and Suntay Joint Coordinate System equations for knee joint flexion."
    Med. Eng. and Phys. Vol. 39. 2017.
    Grood et al. "A joint coordinate system for the clinical description of three-dimensional motions: application to
    the knee." J. of Biomech. Engr. Vol. 105. 1983.
    """
    # extract the proximal (pelvis) segment axes
    X = pelvis_frames[:, :, 0]
    Z = pelvis_frames[:, :, 2]

    # extract the distal (thigh) segment axes
    x = thigh_frames[:, :, 0]
    y = thigh_frames[:, :, 1]

    # create the hip joint axes
    e1 = Z.copy()
    e3 = y.copy()

    e2 = cross(e3, e1)
    e2 /= norm(e2, axis=1, keepdims=True)

    # compute the angles by finding the angle between specific axes
    sgn = nsum(cross(X, e2) * Z, axis=1)
    sgn /= nabs(sgn)
    fe = atan2(sgn * norm(cross(X, e2), axis=1), nsum(X * e2, axis=1))

    if side.lower() == 'right':
        sgn = nsum(cross(x, e2) * -y, axis=1)
    elif side.lower() == 'left':
        sgn = nsum(cross(x, e2) * y, axis=1)
    else:
        raise ValueError('Side must be "left" or "right".')

    sgn /= nabs(sgn)
    ier = atan2(sgn * norm(cross(x, e2), axis=1), nsum(x * e2, axis=1))

    if side.lower() == 'right':
        aa = -pi / 2 + arccos(nsum(e1 * e3, axis=1))
    elif side.lower() == 'left':
        aa = pi / 2 - arccos(nsum(e1 * e3, axis=1))

    return stack((fe, aa, ier), axis=1) * 180 / pi
Ejemplo n.º 23
0
def TestMV(itseries, ind, lobs):
    """
    Performs mean and varaince change statistical tests. Variance change test
    based on F-test(F-distribution), and mean test based on t-test (Student's
    distribution)

    Parameters
    ----------
    itseries: 1D array
        one dimensional array of time series

    ind: integer
        index in time series around which ARIMA(1,0,0) process performed

    lobs: integer
        maximal number of elements to left and right sides around ind index on
        which ARIMA(1,0,0) is performed

    Returns
    -------
    cpvalue: numeric
        0.1, 0.05 or 0, 0.1 if ind around ind mean and variance change
        significanly, 0.05 if only mean or variance changes significantly
        0 in case when none of them changes
    """
    cpvalue = 0
    tstart = max(ind - lobs + 1, 0)  # start of test interval around ind index
    tend = min(ind + lobs + 1,
               len(itseries))  # end of test interval around ind index
    if (len(itseries[tstart:(ind + 1)]) > 1) and (len(itseries[(ind + 1):tend])
                                                  > 1):
        # it is needed to have at least one element in both left and right sides
        # around ind index
        if (var(itseries[tstart:(ind + 1)], ddof=1)
                == 0) and (var(itseries[(ind + 1):tend], ddof=1) != 0):
            cpvalue = 0.1
        # in case when variance is zero on left side and non zero on rigth
        # side then definitely variance changes around ind index
        if (var(itseries[tstart:(ind + 1)], ddof=1) != 0) and (var(
                itseries[(ind + 1):tend], ddof=1) == 0):
            cpvalue = 0.1
        if (var(itseries[tstart:(ind + 1)], ddof=1) *
                var(itseries[(ind + 1):tend], ddof=1)) > 0:
            intseries = array(itseries[tstart:tend])  # slicing test data
            n = len(intseries)
            mid_ind = (n / 2) - 1  # ind element position in sliced data
            all_means = emean(intseries, min_periods=1)
            all_vars = evar(intseries, min_periods=1)
            rev_all_means = emean(intseries[::-1], min_periods=1)
            rev_all_vars = evar(intseries[::-1], min_periods=1)
            test_lens = arange((mid_ind + 1), (n + 1))
            if (rev_all_vars[mid_ind] > 0) and (all_vars[mid_ind] > 0):
                z = all_vars[mid_ind] / rev_all_vars[mid_ind]
                rz = 1 / z
            else:
                z = inf
                rz = 0.0
            ## variance change F-test with reliabilty value 99.8% (0.1%-99.9%)
            if (z > f.ppf(1 - 0.001, mid_ind, mid_ind)) or (z < f.ppf(
                    0.001, mid_ind, mid_ind)):
                cpvalue = 0.05
            if (rz > f.ppf(1 - 0.001, mid_ind, mid_ind)) or (rz < f.ppf(
                    0.001, mid_ind, mid_ind)):
                cpvalue = 0.05
            ## calculation of t-test statistics
            Sx_y = sqrt(
                ((mid_ind * all_vars[mid_ind] + test_lens * all_vars[mid_ind:])
                 * (mid_ind + test_lens)) /
                ((mid_ind + test_lens - 2) * mid_ind * test_lens))
            t_jn = nabs((all_means[mid_ind] - all_means[mid_ind:]) / Sx_y)

            rSx_y = sqrt(((mid_ind * rev_all_vars[mid_ind] +
                           test_lens * rev_all_vars[mid_ind:]) *
                          (mid_ind + test_lens)) /
                         ((mid_ind + test_lens - 2) * mid_ind * test_lens))
            rt_jn = nabs(
                (rev_all_means[mid_ind] - rev_all_means[mid_ind:]) / rSx_y)

            t_stat = nmax((t_jn, rt_jn))
            dfree = n - 2
            # mean change t-test with reliabilty value 99.8% (0.1%-99.9%)
            if t_stat > t.ppf(1 - 0.001, dfree):
                cpvalue = cpvalue + 0.05
        if cpvalue > 0:
            # in case if cpvalue  is 0.1 then checking if detected changepoint is
            # significant by calculating sindic value for interval
            sindic = abs(
                std(itseries[tstart:(ind - 1)], ddof=1) -
                std(itseries[(ind + 1):tend], ddof=1))
            sindic = sindic * mean(itseries[tstart:tend])
            if sindic is not None:
                if sindic <= 0.03:
                    cpvalue = 0  # if sindic is less than 0.03 then changepoint is not significant
    return cpvalue
Ejemplo n.º 24
0
FD = 100
N = 50
filter_line = 3

t = arange(N) / FD
signal = 4 * cos(2 * pi * 37 * t) + 6 * cos(2 * pi * 179 * t) + 13 * cos(
    2 * pi * 74 * t)
noise = (-6) * sample(
    signal.shape[0]
) + 3  #слишком большая амплитуда шума: (-np.pi, np.pi), хороший шум: (-0.1, 0.1)

noised_signal = signal + noise

freq = rfftfreq(N, 1. / FD)
clear_ampl = 2 * nabs(rfft(signal)) / N
ampl = 2 * nabs(rfft(noised_signal)) / N
spectrum = rfft(noised_signal)

fig = plt.figure(figsize=(6, 6))
sub = fig.add_subplot(111)  #цифры внутри - соотножения сторон от фигсайз

plt.plot(freq, ampl, label='Frequences', c='green')
plt.plot(freq, [mean(ampl) for i in range(freq.shape[0])], c='orange')
plt.plot(freq, [filter_line * mean(ampl) for i in range(freq.shape[0])],
         c='red')
plt.plot(freq, clear_ampl, c='blue')

sub.set_xlabel('Частоты (Гц)', fontsize=12)

plt.legend(fontsize=12)
Ejemplo n.º 25
0
	def GetAngularDistributionCoplanar(self, energyGrid, thetaGrid, phi1, phi2,\
		customLFilter=lambda l1,l2: True):
		"""Calculate co-planar angular distribution.
		
		"""
		
		lmax = Getlmax(self.Config)
		angularRank = GetAngularRankIndex(self.Psi)

		#Get spherical harmonics at phi1 and phi2
		assocLegendre1 = array(GetSphericalHarmonics(lmax, thetaGrid, phi1), 
			dtype=complex)
		assocLegendre2 = array(GetSphericalHarmonics(lmax, thetaGrid, phi2), 
			dtype=complex)
		
		#calculate angular distr for double ionized psi, evaluated at phi1=phi2=phi
		#f = angular_distributions.GetDoubleAngularDistributionCoplanar
		#f(self.Psi, self.Z, energyGrid, self.RadialProjections, assocLegendre1, assocLegendre2, thetaGrid)
		
		#Make a copy of the wavefunction and multiply 
		#integration weights and overlap matrix
		tempPsi = self.Psi.Copy()
		repr = self.Psi.GetRepresentation()
		repr.MultiplyIntegrationWeights(tempPsi)
		angRepr = repr.GetRepresentation(angularRank)
	
		cg = pyprop.core.ClebschGordan()
	
		interpCount = len(energyGrid)
	
		thetaCount = len(thetaGrid)
		angularDistr = zeros((thetaCount, thetaCount, interpCount, interpCount),
			dtype=double)
	
		pop = 0
		angularDistrProj = zeros(angularDistr.shape, dtype=complex)
		P = self.ContinuumProjector
		doubleIonProb = 0
		for l1, l2, lPop in self.RadialProjections:

			#Check if we should skip this combination of l1,l2
			if not customLFilter(l1,l2):
				self.Logger.debug("Skipping (l1,l2) = (%i,%i)" % (l1,l2))
				continue

			#number of states in this l-shell (matching energy filter)
			n1 = P.SingleStatesLeft.GetNumberOfStates(l1, self.IsIonizedFilter)
			n2 = P.SingleStatesRight.GetNumberOfStates(l2, self.IsIonizedFilter)
	
			#sum up angular momentum components
			pop = (nabs(lPop)**2).sum(axis=0).reshape(n1, n2)
	
			#add contribution to total double ionization prob.
			doubleIonProb += sum(pop.flatten())
	
			#scale states with 1/dE_1 dE_2
			E1 = array(P.SingleStatesLeft.GetRadialEnergies(l1, 
				self.IsIonizedFilter))
			E2 = array(P.SingleStatesRight.GetRadialEnergies(l2, 
				self.IsIonizedFilter))
			
			#filter out coupled spherical harmonic indices. this gives us a 
			#set of L's for the given l1, l2, M
			lfilter = lambda coupledIndex: coupledIndex.l1 == l1 \
				and coupledIndex.l2 == l2  
			angularIndices = \
				array(GetLocalCoupledSphericalHarmonicIndices(self.Psi,
					lfilter), dtype=int32)
			getIdx = lambda c: angRepr.Range.GetCoupledIndex(int(c))
			coupledIndices = map(getIdx, angularIndices)
	
			if len(angularIndices) == 0:
				continue
		
			#scale states with 1/dE_1 dE_2
			def GetDensity(curE):
				interiorSpacing = list(diff(curE)[1:])
				leftSpacing = (curE[1] - curE[0])
				rightSpacing = (curE[-1] - curE[-2])
				spacing = array([leftSpacing] + interiorSpacing + 
					[rightSpacing])
				return 1.0 / sqrt(spacing)
			stateDensity = outer(GetDensity(E1), GetDensity(E2))
	
			#coulomb phases (-i)**(l1 + l2) * exp( sigma_l1 * sigma_l2 )
			phase1 = exp(1.0j * array([GetCoulombPhase(l1, -self.Z/curK) 
				for curK in sqrt(2*E1)]))
			phase2 = exp(1.0j * array([GetCoulombPhase(l2, -self.Z/curK) 
				for curK in sqrt(2*E2)]))
			phase = (-1.j)**(l1 + l2) * outer(phase1, phase2)
	
			#interpolate projection on equidistant energies and sum over L and M
			#interpProj = zeros((interpCount, interpCount), dtype=complex)
			for j in range(lPop.shape[0]):
				curRadialProj = phase * stateDensity * lPop[j,:,:]
	
				#interpolate in polar complex coordinates
				def dointerp():
					r = abs(curRadialProj)**2
					i = arctan2(imag(curRadialProj), real(curRadialProj))
					argr = cos(i)
					argi = sin(i)
					interpr = scipy.interpolate.RectBivariateSpline(E1, E2, r, 
						kx=1, ky=1)(energyGrid, energyGrid)
					interpArgR = scipy.interpolate.RectBivariateSpline(E1, E2, 
						argr, kx=1, ky=1)(energyGrid, energyGrid)
					interpArgI = scipy.interpolate.RectBivariateSpline(E1, E2, 
						argi, kx=1, ky=1)(energyGrid, energyGrid)
					interpPhase = (interpArgR + 1.j*interpArgI) \
						/ sqrt(interpArgR**2 + interpArgI**2)
					curInterpProj = sqrt(maximum(interpr, 0)) * interpPhase
					return curInterpProj
				curInterpProj = dointerp()
	
				#Sum over m:
				def doSum():
					AddDoubleAngularProjectionCoplanar(angularDistrProj, 
						assocLegendre1, assocLegendre2, curInterpProj, 
						coupledIndices[j])
				doSum()
	
		#calculate projection for this m-shell
		angularDistr = real(angularDistrProj * conj(angularDistrProj))
	
		return angularDistr
Ejemplo n.º 26
0
    def compute(self, prox_a, dist_a, prox_w, dist_w, prox_wd, dist_wd,
                R_dist_prox):
        """
        Perform the computation of the joint center to sensor vectors.

        Parameters
        ----------
        prox_a : numpy.ndarray
            Nx3 array of accelerations measured by the joint proximal sensor.
        dist_a : numpy.ndarray
            Nx3 array of accelerations measured by the joint distal sensor.
        prox_w : numpy.ndarray
            Nx3 array of angular velocities measured by the joint proximal sensor.
        dist_w : numpy.ndarray
            Nx3 array of angular velocities measured by the joint distal sensor.
        prox_wd : numpy.ndarray
            Nx3 array of angular accelerations measured by the joint proximal sensor.
        dist_wd : numpy.ndarray
            Nx3 array of angular accelerations measured by the joint distal sensor.
        R_dist_prox : numpy.ndarray
            Nx3x3 array of rotations from the distal sensor frame to the proximal sensor frame. Ignored if method is
            'SSFC'.

        Returns
        -------
        prox_r : numpy.ndarray
            Joint center to proximal sensor vector.
        dist_r : numpy.ndarray
            Joint center to distal sensor vector.
        residual : float
            Residual value per sample used from the joint center optimization
        """
        if self.method == 'SAC':
            if self.mask_input:
                if self.mask_data == 'acc':
                    prox_data = norm(prox_a, axis=1) - self.g
                    dist_data = norm(dist_a, axis=1) - self.g
                    thresh = 1.0
                elif self.mask_data == 'gyr':
                    prox_data = norm(prox_w, axis=1)
                    dist_data = norm(dist_w, axis=1)
                    thresh = 2.0

                mask = zeros(prox_data.shape, dtype=bool)

                while mask.sum() < self.min_samples:
                    mask = logical_and(
                        nabs(prox_data) > thresh,
                        nabs(dist_data) > thresh)

                    thresh -= 0.05
                    if thresh < 0.09:
                        raise ValueError(
                            'Not enough samples or samples with high motion in the trial provided.  '
                            'Use another trial')
            else:
                mask = zeros(prox_a.shape[0], dtype=bool)
                mask[:] = True

            # create the skew symmetric matrix products
            prox_K = array(
                [[
                    -prox_w[mask, 1]**2 - prox_w[mask, 2]**2,
                    prox_w[mask, 0] * prox_w[mask, 1] - prox_wd[mask, 2],
                    prox_wd[mask, 1] + prox_w[mask, 0] * prox_w[mask, 2]
                ],
                 [
                     prox_wd[mask, 2] + prox_w[mask, 0] * prox_w[mask, 1],
                     -prox_w[mask, 0]**2 - prox_w[mask, 2]**2,
                     prox_w[mask, 1] * prox_w[mask, 2] - prox_wd[mask, 0]
                 ],
                 [
                     prox_w[mask, 0] * prox_w[mask, 2] - prox_wd[mask, 1],
                     prox_wd[mask, 0] + prox_w[mask, 1] * prox_w[mask, 2],
                     -prox_w[mask, 0]**2 - prox_w[mask, 1]**2
                 ]]).transpose([2, 0, 1])

            dist_K = array(
                [[
                    -dist_w[mask, 1]**2 - dist_w[mask, 2]**2,
                    dist_w[mask, 0] * dist_w[mask, 1] - dist_wd[mask, 2],
                    dist_wd[mask, 1] + dist_w[mask, 0] * dist_w[mask, 2]
                ],
                 [
                     dist_wd[mask, 2] + dist_w[mask, 0] * dist_w[mask, 1],
                     -dist_w[mask, 0]**2 - dist_w[mask, 2]**2,
                     dist_w[mask, 1] * dist_w[mask, 2] - dist_wd[mask, 0]
                 ],
                 [
                     dist_w[mask, 0] * dist_w[mask, 2] - dist_wd[mask, 1],
                     dist_wd[mask, 0] + dist_w[mask, 1] * dist_w[mask, 2],
                     -dist_w[mask, 0]**2 - dist_w[mask, 1]**2
                 ]]).transpose([2, 0, 1])

            # create the oversized A and b matrices
            A = concatenate((prox_K, -R_dist_prox[mask] @ dist_K),
                            axis=2).reshape((-1, 6))
            b = (prox_a[mask].reshape(
                (-1, 3, 1)) - R_dist_prox[mask] @ dist_a[mask].reshape(
                    (-1, 3, 1))).reshape((-1, 1))

            # solve the linear least squares problem
            r, residual, _, _ = lstsq(A, b, rcond=None)
            r.resize((6, ))
            residual = residual[0]

        elif self.method == 'SSFC':
            r_init = zeros((6, ))

            if self.mask_input:
                if self.mask_data == 'acc':
                    prox_data = norm(prox_a, axis=1) - self.g
                    dist_data = norm(dist_a, axis=1) - self.g
                    thresh = 1.0
                elif self.mask_data == 'gyr':
                    prox_data = norm(prox_w, axis=1)
                    dist_data = norm(dist_w, axis=1)
                    thresh = 2.0

                mask = zeros(prox_data.shape, dtype=bool)

                while mask.sum() < self.min_samples:
                    mask = logical_and(
                        nabs(prox_data) > thresh,
                        nabs(dist_data) > thresh)

                    thresh -= 0.05
                    if thresh < 0.09:
                        raise ValueError(
                            'Not enough samples or samples with high motion in the trial provided.  '
                            'Use another trial')
            else:
                mask = zeros(prox_a.shape[0], dtype=bool)
                mask[:] = True

            # create the arguments to be passed to both the residual and jacobian calculation functions
            args = (prox_a[mask], dist_a[mask], prox_w[mask], dist_w[mask],
                    prox_wd[mask], dist_wd[mask])

            sol = least_squares(Center._compute_distance_residuals,
                                r_init.flatten(),
                                args=args,
                                **self.opt_kwargs)
            r = sol.x
            residual = sol.cost

        return r[:3], r[3:], residual / mask.sum()
Ejemplo n.º 27
0
    def solve(self, dict_in):
        super(MSIST, self).solve()

        ##################################
        ### Transforms and Modalities ####
        ##################################
        H = self.H  #mapping from solution domain to observation domain
        dict_in['H'] = H
        W = self.W  #sparsifying transform
        dict_in['W'] = W
        # precision = 'float32'
        # if W.output_dtype!='':
        #     precision = W.output_dtype

        if self.alpha.__class__.__name__ != 'ndarray':
            self.alpha = su.spectral_radius(
                self.W, self.H, dict_in['x_0'].shape,
                self.get_val('alphamethod', False, 'spectrum'))
            # self.alpha = su.spectral_radius(self.W, self.H, (64,64,64),
            #                                 self.get_val('alphamethod', False, 'spectrum'))
        alpha = self.alpha  #Lambda_alpha main diagonal (B-sized vector of subband gains)
        dict_in['alpha'] = alpha
        ############
        #Input Data#
        ############

        if H.output_fourier:
            y_hat = dict_in['y']
        else:
            #do an extra FFT to do deconvolution in fourier domain
            y_hat = fftn(dict_in['y'])

        x_n = dict_in['x_0'].copy()  #seed current solution
        # The famous Joan Lasenby "residuals"
        dict_in['x_n'] = x_n
        x = dict_in['x'].copy()
        dict_in['resid_n'] = x - x_n
        x_max = np.max(x)
        x_min = np.min(x)
        # dict_in['resid_range'] = np.array([x_min - x_max, x_max + x_max])
        dict_in['resid_range'] = np.array([-255.0 / 2, 255.0 / 2])
        #######################
        #Common Initialization#
        #######################

        #determine whether/not we need double the wavelet transforms on
        #each iteration for a complex-valued input signal
        self.input_complex = np.iscomplexobj(x_n)

        #initialize current solution in sparse domain
        #g_i is the element group size (2 for CWT, 4 for CWT and input_complex)
        if self.input_complex:
            if self.input_phase_encoded:
                theta_n = su.phase_unwrap(angle(x_n),
                                          dict_in['dict_global_lims'],
                                          dict_in['ls_local_lim_secs'])
            else:
                theta_n = angle(x_n)
            dict_in['theta_n'] = theta_n
            dict_in['magnitude_n'] = nabs(x_n)

            w_n = [W * x_n.real, W * x_n.imag]
            g_i = 2 * (w_n[0].is_wavelet_complex() + 1)
        else:
            w_n = [W * x_n]
            g_i = (w_n[0].is_wavelet_complex() + 1)
        w_n_len = len(w_n)
        w_n_it = xrange(w_n_len)  #iterator for w_n
        dict_in['w_n'] = w_n

        #initialize the precision matrix with zeros
        S_n = w_n[0] * 0
        dict_in['S_n'] = S_n
        #initialize continuation parameters
        epsilon, nu = self.get_epsilon_nu()
        if self.ordepsilon:
            # self.ordepsilonpercstart = 8.0/9.0*(.55**2)
            epsilon = np.zeros(self.int_iterations + 1, )
            self.percentiles = np.arange(30, self.ordepsilonpercstop,
                                         -1.0 / self.int_iterations)
            epsilon[0] = self.get_ord_epsilon(w_n[0], np.inf,
                                              self.percentiles[0])
            dict_in['epsilon_sq'] = epsilon[0]**2
        else:
            dict_in['epsilon_sq'] = epsilon**2
        if self.convexnu:
            nu = np.zeros(self.int_iterations + 1, )
            nu[0] = self.get_convex_nu(w_n[0], epsilon[0]**2, np.min(alpha))
            dict_in['nu_sq'] = nu[0]**2
        else:
            dict_in['nu_sq'] = nu**2

        #wavelet domain variance used for poisson deblurring
        ary_p_var = 0

        ########################################
        #Sparse penalty-specific initialization#
        ########################################

        if self.str_sparse_pen == 'l0rl2_bivar':
            w_tilde = w_n[0] * 0
            sqrt3 = sqrt(3.0)
            sigsq_n = self.get_val('nustop', True)**2
            sig_n = sqrt(sigsq_n)

        if self.str_sparse_pen == 'l0rl2_group':
            tau = self.get_val('tau', True)
            tau_rate = self.get_val('taurate', True)
            tau_start = self.get_val('taustart', True)
            if np.all(tau_start != 0) and tau_rate != 0:
                tau_end = tau
                tau = tau_start
            A = sf.create_section(self.ps_parameters,
                                  self.get_val('clusteraverage',
                                               False))  #cluster
            G = sf.create_section(self.ps_parameters,
                                  self.get_val('groupaverage', False))  #group
            #initialize A and G with parameters of the master vector
            A.init_csr_avg(w_n[0])
            G.init_csr_avg(w_n[0])
            dup_it = xrange(
                A.duplicates)  # iterator for duplicate variable space

            #initialize non-overlapping space (list of ws objects ls_w_hat_n)
            ls_w_hat_n = [[w_n[ix_] * 1 for j in dup_it] for ix_ in w_n_it]

            #initialize non-overlapping space precision
            ls_S_hat_n = [
                ((sum([w_n[ix].energy()
                       for ix in w_n_it]) / g_i) + epsilon[0]**2).invert()
                for int_dup in dup_it
            ]
            w_bar_n = [w_n[ix_] * 1 for ix_ in w_n_it]

            #using the structure of A, initialize the support of Shat, what
            A_row_ix = np.nonzero(A.csr_avg)[0]
            A_col_ix = np.nonzero(A.csr_avg)[1]
            D = csr_matrix((np.ones(A_col_ix.size, ), (A_row_ix, A_col_ix)),
                           shape=A.csr_avg.shape)

            #compute the support of Shat
            ls_S_hat_sup = unflat_list(
                D.transpose() * ((w_n[0] * 0 + 1).flatten()), A.duplicates)

            #load this vector into each new wavelet subband object
            ls_S_hat_sup = [(w_n[0] * 0).unflatten(S_sup)
                            for S_sup in ls_S_hat_sup]
            ls_S_hat_sup = [
                S_hat_n_sup.nonzero() for S_hat_n_sup in ls_S_hat_sup
            ]
            del S_sup
            del S_hat_n_sup
            #precompute AtA (doesn't change from one iteration to the next)
            AtA = (A.csr_avg.transpose() * A.csr_avg).tocsr()

            #convert tau**2 to csr format to allow for subband-adaptive constraint
            if tau.__class__.__name__ != 'ndarray':
                tau_sq = np.ones(w_n[0].int_subbands) * tau**2
            else:
                tau_sq = tau**2
            tau_sq_dia = [((w_n[0] * 0 + 1).cast(A.dtype)) * tau_sq
                          for j in dup_it]
            # tau_sq_dia = [((w_n[0]*0+1))*tau_sq for j in dup_it]
            tau_sq_dia = su.flatten_list(tau_sq_dia)
            offsets = np.array([0])
            tau_sz = tau_sq_dia.size
            tau_sq_dia = dia_matrix((tau_sq_dia, offsets),
                                    shape=(tau_sz, tau_sz))

            #initialize S_hat_bar parameters for efficient matrix inverses
            Shatbar_p_filename = A.file_path.split('.pkl')[0] + 'Shatbar.pkl'
            if not os.path.isfile(Shatbar_p_filename):
                dict_in['col_offset'] = A.int_size
                S_hat_n_csr = su.flatten_list_to_csr(ls_S_hat_sup)
                su.inv_block_diag((tau_sq_dia) * AtA + S_hat_n_csr, dict_in)
                filehandler = open(Shatbar_p_filename, 'wb')
                cPickle.dump(dict_in['dict_bdiag'], filehandler, -1)
                del S_hat_n_csr
            else:
                filehandler = open(Shatbar_p_filename, 'rb')
                dict_in['dict_bdiag'] = cPickle.load(filehandler)
            filehandler.close()

            #store all of the l0rl2_group specific variables in the solver dict_in
            dict_in['ls_S_hat_n'] = ls_S_hat_n
            dict_in['ls_w_hat_n'] = ls_w_hat_n
            dict_in['w_bar_n'] = w_bar_n
            dict_in['G'] = G
            dict_in['A'] = A
            dict_in['W'] = W
            dict_in['AtA'] = AtA
            dict_in['ls_S_hat_sup'] = ls_S_hat_sup
            dict_in['w_n_it'] = w_n_it
            dict_in['dup_it'] = dup_it
            dict_in['ws_dummy'] = w_n[0] * 0
            dict_in['g_i'] = g_i

            # self.update_duplicates(dict_in,nu[0],epsilon[0],tau_sq, tau_sq_dia)

            w_bar_n = dict_in['w_bar_n']
            ls_w_hat_n = dict_in['ls_w_hat_n']
            ls_S_hat_n = dict_in['ls_S_hat_n']
            del D  #iterations need A and G only, not D

        if (self.str_sparse_pen == 'vbmm' or  #vbmm
                self.str_sparse_pen == 'vbmm_hmt'):
            p_a = self.get_val('p_a', True)
            p_b_0 = self.get_val('p_b_0', True)
            p_k = self.get_val('p_k', True)
            p_theta = self.get_val('p_theta', True)
            p_c = self.get_val('p_c', True)
            p_d = self.get_val('p_d', True)
            b_n = w_n[0] * 0
            sigma_n = 0
            if self.str_sparse_pen == 'vbmm_hmt':
                ary_a = self.get_gamma_shapes(W * dict_in['x_0'])
                b_n = w_n[0] * p_b_0

        #poisson + gaussiang noise,
        #using the scaling coefficients in the regularization (MSIST-P)
        if self.input_poisson_corrupted:
            #need a 0-padded y to get the right size for the scaling coefficients
            b = dict_in['b']
            if not H.output_fourier:
                y_hat = fftn(dict_in['y'] - b)
            else:
                y_hat = fftn(ifftn(dict_in['y']) - b)
            w_y = (W * dict_in['y_padded'])
            dict_in['x_n'] = su.crop_center(x_n, dict_in['y'].shape)
            w_y_scaling_coeffs = w_y.downsample_scaling()

        self.results.update(dict_in)
        print 'Finished itn: n=' + str(0)
        #begin iterations here for the MSIST(-X) algorithm, add some profiling info here
        if self.profile:
            dict_profile = {}
            dict_profile['twoft_time'] = []
            dict_profile['wht_time'] = []
            dict_profile['other_time'] = []
            dict_profile['reproj_time_inv'] = []
            dict_profile['reproj_time_for'] = []
            dict_in['profiling'] = dict_profile
            t0 = time.time()
        ####################
        ##Begin Iterations##
        ####################
        for n in np.arange(self.int_iterations):
            ####################
            ###Landweber Step###
            ####################
            twoft_0 = time.time()
            H.set_output_fourier(True)  #force Fourier output to reduce ffts
            if self.input_complex:
                f_resid = y_hat - H * x_n  #Landweber difference
            else:
                f_resid = ifftn(y_hat - H * x_n)
                H.set_output_fourier(False)
            twoft_1 = time.time()
            if self.input_complex:
                HtHf = (~H) * f_resid
                w_resid = [W * (HtHf).real, W * (HtHf).imag]
            else:
                w_resid = [W * ((~H) * f_resid)]
            wht = time.time()
            if self.profile:
                dict_profile['twoft_time'].append(twoft_1 - twoft_0)
                dict_profile['wht_time'].append(wht - twoft_1)
            ########################
            ######Convex Nu#########
            #####Ord/HMT Epsilon####
            ########################
            if self.ordepsilon:
                if n == 0:
                    prevepsilon = epsilon[0]
                else:
                    prevepsilon = epsilon[n - 1]
                epsilon[n] = self.get_ord_epsilon(w_n[0], prevepsilon,
                                                  self.percentiles[n])
                dict_in['epsilon_sq'] = epsilon[n]**2
            if self.convexnu:
                nu[n] = self.get_convex_nu(w_n[0], epsilon[n]**2,
                                           np.min(self.alpha))
                dict_in['nu_sq'] = nu[n]**2

            ###############################################
            ###Sparse Penalty-Specific Thresholding Step###
            ###############################################
            if self.str_sparse_pen == 'l0rl2_group':
                #S_hat_n, w_hat_n, and wb_bar (eqs 11, 19, and 13)
                self.update_duplicates(dict_in, nu[n], epsilon[n], tau_sq,
                                       tau_sq_dia)
                w_bar_n = dict_in['w_bar_n']
                ls_w_hat_n = dict_in['ls_w_hat_n']

            #####################################################
            #Subband-adaptive subband update of precision matrix#
            #####################################################
            if (self.str_sparse_pen[0:5] == 'l0rl2'
                    and self.str_sparse_pen[-5:] != 'bivar'):
                if self.str_sparse_pen == 'l0rl2_group':
                    S0_n = nsum(
                        [nabs(w_n[ix].ary_lowpass)**2 for ix in w_n_it],
                        axis=0) / g_i + epsilon[n]**2
                    S0_n = 1.0 / S0_n
                else:
                    if self.hmt:
                        S_n_prev = S_n * 1.0
                        S_n.set_subband(
                            0, (1.0 /
                                ((1.0 / g_i) * nabs(w_n[0].get_subband(0))**2 +
                                 (epsilon[n]**2))))

                        for s in xrange(w_n[0].int_subbands - 1, 0, -1):
                            sigma_sq_parent_us = nabs(
                                w_n[0].get_upsampled_parent(s))**2
                            s_parent_sq = 1.0 / (
                                (2.0**(-2.25)) *
                                (1.0 / g_i * sigma_sq_parent_us))
                            S_n.set_subband(s, s_parent_sq)

                    else:
                        S_n = (sum([w_n[ix_].energy()
                                    for ix_ in w_n_it]) / g_i +
                               epsilon[n]**2).invert()
            elif (self.str_sparse_pen[0:5] == 'vbmm'
                  and self.str_sparse_pen[-5:] != 'hmt'):
                cplx_norm = 1.0 + self.input_complex
                S_n = ((g_i + 2.0 * p_a) *
                       (sum([w_n[ix_].energy()
                             for ix_ in w_n_it]) / cplx_norm + sigma_n +
                        2.0 * b_n).invert())
                b_n = (p_k + p_a) * (S_n.get_subband(s) + p_theta).invert()
                sigma_n = (1.0 / nu[n]**2 * alpha[s] + S_n).invert()

            else:
                #iterating through subbands is necessary, coarse to fine
                for s in xrange(w_n[0].int_subbands - 1, -1, -1):
                    #Sendur Selesnick BSWLVE paper
                    if self.str_sparse_pen == 'l0rl2_bivar':
                        if s > 0:
                            s_parent_us = nabs(
                                w_n[0].get_upsampled_parent(s))**2
                            s_child = nabs(w_n[0].get_subband(s))**2
                            yi, yi_mask = su.get_neighborhoods(s_child,
                                                               1)  #eq 8
                            s_child_norm = sqrt(s_parent_us + s_child)
                            sigsq_y = np.sum(yi, axis=yi.ndim - 1) / np.sum(
                                yi_mask, axis=yi.ndim - 1)  #still eq 8...
                            sig = sqrt(np.maximum(sigsq_y - sigsq_n, 0))
                            w_tilde.set_subband(s, sqrt3 * sigsq_n /
                                                sig)  #the thresholding fn
                            thresh = np.maximum(
                                s_child_norm - w_tilde.get_subband(s),
                                0) / s_child_norm  #eq 5
                            if np.mod(
                                    n, 2
                            ) == 0:  #update with the bivariate thresholded coefficients on every other iteration
                                S_n.set_subband(
                                    s,
                                    (1.0 /
                                     ((1.0 / g_i) *
                                      nabs(thresh * w_n[0].get_subband(s))**2 +
                                      (epsilon[n]**2))))
                            else:
                                S_n.set_subband(
                                    s, (1.0 / ((1.0 / g_i) *
                                               nabs(w_n[0].get_subband(s))**2 +
                                               (epsilon[n]**2))))
                        else:
                            S_n.set_subband(s, (1.0 / (
                                (1.0 / g_i) * nabs(w_n[0].get_subband(s))**2 +
                                epsilon[n]**2)))

                    elif self.str_sparse_pen == 'vbmm_hmt':  #vbmm
                        if n == 0:
                            sigma_n = 0
                        else:
                            sigma_n = (1.0 / nu[n]**2 * alpha[s] +
                                       S_n.get_subband(s))**(-1)
                        if s > 0:
                            w_parent_us = w_n[0].get_upsampled_parent(s)
                            alpha_dec = 2.25
                            if s > S_n.int_orientations:
                                s_child = S_n.subband_group_sum(
                                    s - S_n.int_orientations, 'children')
                                b_child = b_n.subband_group_sum(
                                    s - S_n.int_orientations, 'children')
                            else:
                                s_child = 0
                                b_child = 0
                            if s < S_n.int_subbands - S_n.int_orientations:
                                ap = ary_a[s + S_n.int_orientations]
                            else:
                                ap = .5
                            w_en_avg = w_n[0].subband_group_sum(
                                s, 'parent_children')
                            S_n.set_subband(
                                s, (g_i + 2.0 * ary_a[s]) /
                                (nabs(w_n[0].get_subband(s))**2 + sigma_n +
                                 2.0 * b_n.get_subband(s)))
                            b_n.set_subband(s, ary_a[s] * w_en_avg)
                        else:  #no parents, so generate fixed-param gammas
                            S_n.set_subband(
                                s, (g_i + 2.0 * ary_a[s]) /
                                (nabs(w_n[0].get_subband(s))**2 + sigma_n +
                                 2.0 * b_n.get_subband(s)))
                            b_n.set_subband(s, (p_k + ary_a[s]) /
                                            (S_n.get_subband(s) + p_theta))
                    else:
                        raise ValueError('no such solver variant')
            #########################
            #Update current solution#
            #########################
            for s in xrange(w_n[0].int_subbands - 1, -1, -1):
                if self.input_poisson_corrupted:
                    if s == 0:
                        ary_p_var = w_y.ary_lowpass
                    else:
                        int_lev, int_ori = w_n[0].lev_ori_from_subband(s)
                        ary_p_var = w_y_scaling_coeffs[int_lev]
                        ary_p_var[ary_p_var <= 0] = 0
                if (self.str_sparse_pen == 'l0rl2_group'):
                    if s > 0:
                        for ix_ in w_n_it:
                            w_n[ix_].set_subband(
                                s,
                                (alpha[s] * w_n[ix_].get_subband(s) +
                                 w_resid[ix_].get_subband(s) +
                                 (tau_sq[s]) * w_bar_n[ix_].get_subband(s)) /
                                (alpha[s] + tau_sq[s]))
                    else:  #a standard msist update for the lowpass coeffs
                        for ix_ in w_n_it:
                            w_n[ix_].set_subband(s, \
                                                 (alpha[s] * w_n[ix_].get_subband(s) + w_resid[ix_].get_subband(s)) /
                                                 (alpha[s] + (nu[n]**2) * S0_n))
                else:
                    for ix_ in w_n_it:
                        w_n[ix_].set_subband(
                            s, (alpha[s] * w_n[ix_].get_subband(s) +
                                w_resid[ix_].get_subband(s)) /
                            (alpha[s] +
                             (nu[n]**2 + self.sc_factor * ary_p_var) *
                             S_n.get_subband(s)))
                #end updating subbands

            #############################################
            ##Solution Domain Projection and Operations##
            #############################################
            tother = time.time()
            if self.input_complex:
                x_n = np.asfarray(~W * w_n[0], 'complex128')
                x_n += 1j * np.asfarray(~W * w_n[1], 'complex128')
                m_n = nabs(x_n)
                theta_n = angle(x_n)
                if self.input_phase_encoded:  #need to apply boundary conditions for phase encoded velocity
                    #the following isn't part of the documented algorithm
                    #it only needs to be executed at the end to fix
                    #phase wrapping in very high dynamic-phase regions
                    theta_n = su.phase_unwrap(angle(x_n),
                                              dict_in['dict_global_lims'],
                                              dict_in['ls_local_lim_secs'])
                    if self.get_val(
                            'iterationmask', True
                    ):  #apply boundary conditions for phase encoded velocity
                        theta_n *= dict_in['mask']
                        if self.get_val('magnitudemask', True, 1):
                            m_n *= dict_in[
                                'mask']  #uncomment this for 'total' masking
                    x_n = m_n * exp(1j * theta_n)
                dict_in['theta_n'] = theta_n
                dict_in['magnitude_n'] = m_n
            else:
                x_n = ~W * w_n[0]
            tinvdwt = time.time()
            #implicit convolution operator is used, so crop and repad
            if H.str_object_name == 'Blur' and H.lgc_even_fft:
                x_n = su.crop_center(x_n, dict_in['y'].shape)
            if self.input_poisson_corrupted and self.spatial_threshold:
                x_n[x_n < self.spatial_threshold_val] = 0.0

            #finished spatial domain operations on this iteration, store
            dict_in['x_n'] = x_n
            # store "residuals"
            dict_in['resid_n'] = x - x_n
            # dict_in['resid_range'] = np.array([np.min(dict_in['resid_n']), np.max(dict_in['resid_n'])])
            print "resid min " + str(np.round(np.min(dict_in['resid_n']), 2))
            print "resid max " + str(np.round(np.max(dict_in['resid_n']), 2))

            if H.str_object_name == 'Blur' and H.lgc_even_fft:
                x_n = su.pad_center(x_n, dict_in['x_0'].shape)

            #############################
            #Wavelet Domain Reprojection#
            #############################
            if self.profile:
                dict_profile['other_time'].append(tother - wht)
            if self.input_complex:
                w_n = [W * x_n.real, W * x_n.imag]
            else:
                w_n = [W * x_n]
            tforwardwt = time.time()
            if self.profile:
                dict_profile['reproj_time_inv'].append(tinvdwt - tother)
                dict_profile['reproj_time_for'].append(tforwardwt - tinvdwt)
            if self.str_sparse_pen[:11] == 'l0rl2_group':
                ls_w_hat_n = [[
                    ls_w_hat_n[ix_][j] * ls_S_hat_sup[j] + w_bar_n[ix_] *
                    ((ls_S_hat_sup[j] + (-1)) * (-1)) for j in dup_it
                ] for ix_ in w_n_it]  #fill in the gaps with w_bar_n
                w_bar_n = [W * ((~W) * w_bar_n[ix_]) for ix_ in w_n_it]
                ls_w_hat_n = [[
                    W * ((~W) * w_hat_n) for w_hat_n in ls_w_hat_n[ix_]
                ] for ix_ in w_n_it]
                dict_in['w_bar_n'] = w_bar_n
                dict_in['ls_w_hat_n'] = ls_w_hat_n
                if tau_rate != 0 and not np.any(tau > tau_end):
                    tau_sq_dia = tau_rate * tau_sq_dia
                    tau = np.sqrt(tau_rate) * tau
            dict_in['w_n'] = w_n
            dict_in['S_n'] = S_n
            ################
            #Update Results#
            ################
            self.results.update(dict_in)
            print 'Finished itn: n=' + str(n + 1)
            # if self.str_sparse_pen[:11] == 'l0rl2_group' and n==150: #an interesting experiment for cs..
            #     self.str_sparse_pen = 'l0rl2'

        return dict_in
Ejemplo n.º 28
0
    def preprocess(self, dict_in):
        """Loads observation model parameters into a dictionary, 
        performs the forward model and provides an initial solution.

        Args:
        dict_in (dict): Dictionary which must include the following members:
            'x' (ndarray): The 'ground truth' input signal to be modified.
        """
        #build the preprocessing parameters
        if (self.str_type == 'brainwebmri'):
            #need to pad/crop the input data for wavelet processing
            swap_axes = self.get_val('swapaxes', True)
            if swap_axes.__class__.__name__ == 'ndarray':
                dict_in['x'] = dict_in['x'].swapaxes(swap_axes[0],
                                                     swap_axes[1])
            input_shape = dict_in['x'].shape

            #cropping
            new_shape = self.get_val('newshape', True)
            if new_shape.__class__.__name__ == 'ndarray':
                new_shape = tuple(new_shape)
            #figure out what to crop, if anything
            if np.any(new_shape < input_shape):
                crop_shape = np.min(np.vstack((new_shape, input_shape)),
                                    axis=0)
                dict_in['x'] = crop_center(dict_in['x'], crop_shape)
            else:
                crop_shape = input_shape
            #padding
            if np.any(new_shape > crop_shape):
                pad_shape = np.max(np.vstack((new_shape, crop_shape)), axis=0)
                dict_in['x'] = pad_center(dict_in['x'], pad_shape)
        # elif (self.str_type == 'superresolution'):
        #     #need to crop edge of image to make results compatible with the literature

        elif (self.str_type == 'phasevelocity'):
            mask_sec_in = self.get_val('masksectioninput', False)
            bmask_sec_in = self.get_val('boundarymasksectioninput', False)
            ls_local_lim_sec_in = self.get_val('vcorrects', False)
            if ls_local_lim_sec_in.__class__.__name__ == 'str' and ls_local_lim_sec_in:
                ls_local_lim_sec_in = [ls_local_lim_sec_in]
            ls_local_lim_secs = []
            if ls_local_lim_sec_in:
                ls_local_lim_secs = [
                    sf.create_section(self.get_params(), local_lim_sec_in)
                    for local_lim_sec_in in ls_local_lim_sec_in
                ]
                ls_local_lim_secs = [{
                    'phaselowerlimit':
                    local_lim.get_val('phaselowerlimit', True),
                    'phaseupperlimit':
                    local_lim.get_val('phaseupperlimit', True),
                    'regionupperleft':
                    local_lim.get_val('regionupperleft', True),
                    'regionlowerright':
                    local_lim.get_val('regionlowerright', True)
                } for local_lim in ls_local_lim_secs]
            #load the mask
            if mask_sec_in != '':
                sec_mask_in = sf.create_section(self.get_params(), mask_sec_in)
                dict_in['mask'] = np.asarray(sec_mask_in.read(dict_in, True),
                                             dtype='bool')
            else:
                dict_in['mask'] = True

            if bmask_sec_in != '':
                sec_bmask_in = sf.create_section(self.get_params(),
                                                 bmask_sec_in)
                dict_in['boundarymask'] = np.asarray(sec_bmask_in.read(
                    dict_in, True),
                                                     dtype='bool')
            else:
                dict_in['boundarymask'] = np.asarray(np.zeros(
                    dict_in['x'][:, :, 0].shape),
                                                     dtype='bool')

            if self.get_val('nmracquisition',
                            True):  #compute phase from lab measurement
                #The frame ordering determines in which direction to compute the
                #phase differences to obtain positive velocities

                frame_order = [0, 1]
                if self.get_val('reverseframeorder'):
                    frame_order = [1, 0]
                #Fully sampled fourier transform in order to extract phase data
                for frame in xrange(2):
                    dict_in['x'][:, :, frame] = fftn(
                        fftshift(dict_in['x'][:, :, frame]))
                if self.get_val('extrafftshift', True):
                    for frame in xrange(2):
                        dict_in['x'][:, :,
                                     frame] = fftshift(dict_in['x'][:, :,
                                                                    frame])

                #Compute phase differences between the two frames
                diff_method = self.get_val('phasedifferencemethod')
                if diff_method == 'conjugateproduct':
                    new_x = (dict_in['x'][:, :, frame_order[1]] *
                             conj(dict_in['x'][:, :, frame_order[0]]))
                    theta = angle(new_x)
                    theta += np.max(np.abs(theta))
                    # theta /= np.max(np.abs(theta))
                    # theata *= np.pi*2
                    magnitude = sqrt(abs(new_x))

                elif diff_method == 'subtraction':
                    theta = (angle(dict_in['x'][:, :, frame_order[1]]) -
                             angle(dict_in['x'][:, :, frame_order[0]]))
                    magnitude = 0.5 * (
                        np.abs(dict_in['x'][:, :, frame_order[0]]) +
                        np.abs(dict_in['x'][:, :, frame_order[1]]))
                # if self.get_val('reverseframeorder'):
                #     theta = -theta
                #     theta+=np.abs(np.min(theta))
                new_x = magnitude * exp(1j * theta)

            else:  #synthetic data
                theta = angle(dict_in['x'])
                magnitude = nabs(dict_in['x'])

            #Do phase unwrapping. This works almost everywhere, except
            #in certain areas where the range of phases exceeds 2*pi.
            #These areas must also be unwrapped with special limits
            #which are determined from the data.
            dict_global_lims = {}
            dict_global_lims['lowerlimit'] = self.get_val(
                'phaselowerlimit', True)
            dict_global_lims['upperlimit'] = self.get_val(
                'phaseupperlimit', True)
            dict_global_lims['boundary_mask'] = dict_in['boundarymask']
            dict_global_lims['boundary_upperlimit'] = self.get_val(
                'boundaryphaseupperlimit', True)
            dict_global_lims['boundaryoverlapvcorrects'] = self.get_val(
                'boundaryoverlapvcorrects', True)

            theta = phase_unwrap(theta, dict_global_lims, ls_local_lim_secs)
            magnitude /= np.max(nabs(magnitude))
            dict_in['x'] = magnitude * exp(1j * theta)
            dict_in['theta'] = dict_in['mask'] * theta
            dict_in['magnitude'] = magnitude
            dict_in['dict_global_lims'] = dict_global_lims
            dict_in['ls_local_lim_secs'] = ls_local_lim_secs