def _draw_rotation_matrix(dim=3):
        if (dim == 3):
                phi = rtnorm(0, 2 * np.pi, mu=1, sigma=1, size=1, probabilities=False)
                theta = rtnorm(0, np.pi, mu=1, sigma=1, size=1, probabilities=False)
                u1 = np.array([math.sin(theta)*math.cos(phi), math.sin(theta)*math.sin(phi), math.cos(theta)])
                u2 = np.array([math.cos(theta)*math.cos(phi), math.cos(theta)*math.sin(phi), -1*math.sin(theta)])
                u3 = np.array([-1*math.sin(phi), math.cos(phi), 0])
                rotMatrix = np.column_stack((u1, u2, u3))
        else:
                # a = np.random.normal(loc=0.0, scale=1.0, size=(dim, dim))
                a = 100*np.random.rand(dim, dim)-50
                q, r = np.linalg.qr(a, mode='complete')
                r = np.diag(np.sign(np.diag(r)))
                rotMatrix = np.dot(q, r)
                rotMatrix = q
        return rotMatrix
Esempio n. 2
0
def truncated_normal_draw(mu,tau):
    sigma = numpy.float64(1.0) / math.sqrt(tau)
    if tau == 0.:
        return 0.
    d = rtnorm.rtnorm(a=0., b=numpy.inf, mu=mu, sigma=sigma)[0]
    #a,b = -mu/sigma, numpy.inf
    #d = truncnorm(a, b, loc=mu, scale=sigma).rvs(1)[0]
    return d if (d >= 0. and d != numpy.inf and d != -numpy.inf and not numpy.isnan(d)) else 0.
def TN_vector_draw(mus,taus):
    sigmas = numpy.float64(1.0) / numpy.sqrt(taus)
    draws = []
    for (mu,sigma,tau) in zip(mus,sigmas,taus):
        if tau == 0.:
            draws.append(0)
        else:
            d = rtnorm.rtnorm(a=0., b=numpy.inf, mu=mu, sigma=sigma)[0]
            d = d if (d >= 0. and d != numpy.inf and d != -numpy.inf and not numpy.isnan(d)) else 0.
            draws.append(d)   
    '''
    draws = parallel_draw(self.mu,self.sigma,self.tau)
    '''     
    return draws           
Esempio n. 4
0
def truncated_normal_vector_draw(mus, taus):
    sigmas = numpy.float64(1.0) / numpy.sqrt(taus)
    draws = []
    for (mu, sigma, tau) in zip(mus, sigmas, taus):
        if tau == 0.:
            draws.append(0)
        else:
            d = rtnorm.rtnorm(a=0., b=numpy.inf, mu=mu, sigma=sigma)[0]
            d = d if (d >= 0. and d != numpy.inf and d != -numpy.inf
                      and not numpy.isnan(d)) else 0.
            draws.append(d)
    '''
    draws = parallel_draw(self.mu,self.sigma,self.tau)
    '''
    return draws
Esempio n. 5
0
def test_version2(n = 50000, silent= False):
	"""
		test the speed of rtnorm through cython
		test for both fixed limits a,b and variable limits
		n - number of rep
	"""
	rtnorm_obj = rtnorm()
	times = np.zeros(4)
	
	start = time.time()
	x = rtnorm_obj.sample(amin +0.1, 1,size=n)  # @UnusedVariable
	times[0] = time.time() - start
	
	start = time.time()
	rtnorm_obj.sample(amax -0.1, 4,size=n)
	times[1] = time.time() - start
	
	
	a_vec = 5*np.random.randn(n)
	b_vec = a_vec + np.abs(5*np.random.randn(n))
	
	start = time.time()
	rtnorm_obj.sample(a_vec, b_vec)
	times[2] = time.time() - start	

	a_vec = 5*np.random.randn(n)
	b_vec = a_vec + np.abs(5*np.random.randn(n))
	mu	 = np.zeros(n)
	sigma  = np.ones(n)
	start = time.time()
	rtnorm_obj.sample(a_vec, b_vec, mu, sigma, error_check=False)
	times[3] = time.time() - start	   
	
	if not silent:
		print('for rntorm_v2:')
		for i, time_ in enumerate(times):
			print('test_{i} = {time:.5f}'.format(i = i, time = time_))
			
	
	
			
			
	return times
Esempio n. 6
0
def test_version2(n=50000, silent=False):
    """
		test the speed of rtnorm through cython
		test for both fixed limits a,b and variable limits
		n - number of rep
	"""
    rtnorm_obj = rtnorm()
    times = np.zeros(4)

    start = time.time()
    x = rtnorm_obj.sample(amin + 0.1, 1, size=n)  # @UnusedVariable
    times[0] = time.time() - start

    start = time.time()
    rtnorm_obj.sample(amax - 0.1, 4, size=n)
    times[1] = time.time() - start

    a_vec = 5 * np.random.randn(n)
    b_vec = a_vec + np.abs(5 * np.random.randn(n))

    start = time.time()
    rtnorm_obj.sample(a_vec, b_vec)
    times[2] = time.time() - start

    a_vec = 5 * np.random.randn(n)
    b_vec = a_vec + np.abs(5 * np.random.randn(n))
    mu = np.zeros(n)
    sigma = np.ones(n)
    start = time.time()
    rtnorm_obj.sample(a_vec, b_vec, mu, sigma, error_check=False)
    times[3] = time.time() - start

    if not silent:
        print('for rntorm_v2:')
        for i, time_ in enumerate(times):
            print('test_{i} = {time:.5f}'.format(i=i, time=time_))

    return times
 def rvs(self):
     return rt.rtnorm(self.a, self.b, mu=self.loc, sigma=self.scale)[0]
Esempio n. 8
0
    def _draw_latent_scores(self, Z, R, Rlevels, W, n, plugin_marginal):
        """Draw scores from the latent variables of the copula.

        Arguments:
            Z {np.ndarray[np.double_t, ndim=2]} -- [description]
            R {np.ndarray[np.int_t, ndim=2]} -- [description]
            Rlevels {np.ndarray[np.int_t, ndim=1]} -- [description]

        Returns:
            [type] -- [description]
        """

        idx1 = np.zeros((Z.shape[1], ), dtype=bool)
        perm = np.random.permutation(Z.shape[1])

        sampler = rtnorm.rtnorm()

        # NOTE/REMINDER: nas are encoded as -1 in self.R
        has_nans = np.any(R == -1)
        var = 1 / W.diagonal()
        sd = np.sqrt(var)
        for _a in range(perm.shape[0]):
            i = perm[_a]

            idx1[i] = 1
            mu = Z[:, ~idx1] @ (W[~idx1, idx1] * (-var[i]))
            idx1[i] = 0

            if (not plugin_marginal[i]):
                _ir_allr = [R[:, i] == r for r in range(Rlevels[i])] + [()]
                for r in range(Rlevels[i]):

                    # set lower bound to maximum Z score of values corresponding to the next lower rank
                    _ir = _ir_allr[r - 1]  #(R[:, i] == r-1) & (R[:, i] != -1)
                    if (not np.any(_ir)):
                        lb = -np.inf
                    else:
                        lb = np.max(Z[_ir, i])

                    # set upper bound to minimum Z score of values corresponding to the next higher rank
                    _ir = _ir_allr[r + 1]  #(R[:, i] == r+1) & (R[:, i] != -1)
                    if (not np.any(_ir)):
                        ub = np.inf
                    else:
                        ub = np.min(Z[_ir, i])

                    # set the scores of the values corresponding to the current rank to a draw from a truncated normal, bounded as shown above
                    _ir = _ir_allr[r]  #(R[:, i] == r) & (R[:, i] != -1)
                    # a, b = (lb - mu[_ir]) / sd[i], (ub - mu[_ir]) / sd[i]
                    # Z[_ir, i] = stats.truncnorm.rvs(a, b)
                    Z[_ir,
                      i] = sampler.cyrtnormClass.sample(a=np.array([lb]),
                                                        b=np.array([ub]),
                                                        mu=mu[_ir],
                                                        sigma=np.array([sd[i]
                                                                        ]))

            if (has_nans):
                _ir = (R[:, i] == -1)
                Z[_ir, i] = np.random.normal(loc=mu[_ir], scale=sd[i])
            # update the scores..
            ranks = mstats.rankdata(Z[:, i])
            Z[:, i] = stats.norm.ppf(ranks / (n + 1))

        return Z
Esempio n. 9
0
        # PERTURBE THE MAXIMUM SLOPE ANGLE ACCORDING TO PROBABILITY LAW

        # this expression define a coefficient used for the direction of the next slope
        if (max_slope_prob < 1):

            # angle defining the direction of the new slope. when slope=0, then
            # we have an uniform distribution for the possible angles for the next lobe.

            slopedeg = 180.0 * np.arctan(slope) / pi

            if (slopedeg > 0.0) and (max_slope_prob > 0):

                sigma = (1.0 - max_slope_prob) / max_slope_prob * (
                    90.0 - slopedeg) / slopedeg
                rand_angle_new = rtnorm.rtnorm(-180, 180, 0, sigma)

            else:

                rand = np.random.uniform(0, 1, size=1)
                rand_angle_new = 360.0 * np.abs(rand - 0.5)

            angle[i] = max_slope_angle + rand_angle_new

        else:

            angle[i] = max_slope_angle

        # factor for the lobe eccentricity
        aspect_ratio = min(max_aspect_ratio, 1.0 + aspect_ratio_coeff * slope)
Esempio n. 10
0
    def setUp(self):

        self.rtnorm_obj = rtnorm()
Esempio n. 11
0
        slope = np.sqrt(np.square(Fx_test)+np.square(Fy_test))

        # PERTURBE THE MAXIMUM SLOPE ANGLE ACCORDING TO PROBABILITY LAW
        
        # this expression define a coefficient used for the direction of the next slope
        if ( max_slope_prob < 1 ):

            # angle defining the direction of the new slope. when slope=0, then
            # we have an uniform distribution for the possible angles for the next lobe.  

            slopedeg = 180.0 * np.arctan(slope) / pi

            if ( slopedeg > 0.0 ) and ( max_slope_prob > 0 ):

                sigma = (1.0 - max_slope_prob ) / max_slope_prob * ( 90.0 - slopedeg ) / slopedeg
                rand_angle_new = rtnorm.rtnorm(-180,180,0,sigma)

            else:

                rand = np.random.uniform(0, 1, size=1)
                rand_angle_new = 360.0 * np.abs( rand-0.5 )

            angle[i] = max_slope_angle + rand_angle_new



        else:

            angle[i] = max_slope_angle
		
        # factor for the lobe eccentricity
Esempio n. 12
0
	def setUp(self):
		
		self.rtnorm_obj = rtnorm()
Esempio n. 13
0
    def __init__(self,
                 cube,
                 instrument=None,
                 mask=None,
                 variance=None,
                 model=SingleGaussianLineModel,
                 initial_parameters=None,
                 jump_amplitude=0.1,
                 gibbs_apriori_variance=None,
                 max_iterations=100000,
                 keep_one_in=1,
                 write_every=10000,
                 min_acceptance_rate=0.01):

        # Assert the sanity of provided parameters
        assert keep_one_in > 0, "keep_one_in= MUST be a positive integer"
        assert write_every > 0, "write_every= MUST be a positive integer"
        assert max_iterations > 0, "max_iterations= MUST be a positive integer"

        # Assign the logger to a property for convenience
        self.logger = logger

        # Set up the input data cube
        if isinstance(cube, basestring):
            cube = Cube.from_fits(cube)
        if not isinstance(cube, Cube):
            # try:  # todo: implement Cube.from_mpdaf() in hyperspectral first
            #     import mpdaf
            #     if isinstance(cube, mpdaf.obj.Cube):
            #         if variance is None:
            #             variance = Cube(data=cube.var)
            #         cube = Cube.from_mpdaf(cube)
            #     else:
            #         raise TypeError("Provided cube is not a HyperspectralCube "
            #                         "nor mpdaf's Cube")
            # except ImportError:
            #     raise TypeError("Provided cube is not a HyperspectralCube")
            raise TypeError("Provided cube is not a HyperspectralCube")
        if cube.is_empty():
            raise ValueError("Provided cube is empty")
        self.cube = cube

        # Ensure numerical stability
        signal_max = np.max(self.cube.data)
        assert signal_max > 1e-10, \
            "The input cube has data that is too small and will cause " \
            "numerical instability, infinite loops, or worse : bad science."

        # Collect information about the cube
        cube_shape = cube.data.shape
        cube_width = cube_shape[2]
        cube_height = cube_shape[1]
        cube_depth = cube_shape[0]

        # Spatial mask, so we don't iterate over all spaxels.
        # 1: valid, 0: invalid. By default, all spaxels are valid.
        if mask is None:
            mask = np.ones((cube_height, cube_width))
        if isinstance(mask, basestring):
            mask = getdata(mask)
        self.mask = mask

        # Flag invalid spaxels : we won't use them for iteration and summation.
        # By default, we flag as invalid all spaxels that have a NaN value
        # somewhere in the spectrum.
        self.mask[np.isnan(np.sum(self.cube.data, 0))] = 0

        # Count the number of spaxels we're going to parse
        spaxels_count = np.sum(self.mask)

        # Count the number of iterations we're actually going to save
        cnt_iterations = math.ceil(max_iterations / float(keep_one_in))

        # Set up the variance
        if variance is not None:
            if isinstance(variance, basestring):
                variance = Cube.from_fits(variance)
            if isinstance(variance, Cube):
                if variance.data is None:
                    self.logger.warning("Provided variance cube is empty")
                self.logger.info("Using provided variance : %s" % variance)
                variance_cube = variance.data
                self.logger.info(
                    "Replacing zeros in the variance cube by 1e12")
                variance_cube = np.where(variance_cube == 0.0, 1e12,
                                         variance_cube)
            elif isinstance(variance, np.ndarray):
                variance_cube = variance
            else:
                raise TypeError("Provided variance is not a Cube")
        else:
            # Clip data, and collect standard deviation sigma
            sub_data = np.copy(cube.data[2:-2, 2:-4, 2:4])
            _, clip_sigma, _ = median_clip(sub_data, 2.5)
            # Adjust sigma if it is zero, as we'll divide with it later on
            if clip_sigma == 0:
                clip_sigma = 1e-20  # arbitrarily low value
            variance_cube = np.ones(cube_shape) * clip_sigma**2

        # Save the variance cube and the standard deviation cube (error cube)
        if variance_cube.shape != cube_shape:
            raise ValueError("Provided variance has not the correct shape."
                             "Expected %s, got %s" %
                             (str(cube_shape), str(variance_cube.shape)))
        self.variance_cube = variance_cube  # sigmas ** 2
        self.error_cube = np.sqrt(self.variance_cube)  # sigmas

        # Set up the instrument
        if not isinstance(instrument, Instrument):
            raise TypeError("Provided instrument is not an Instrument")
        self.instrument = instrument

        # Set up the spread functions from the instrument
        self.lsf = self.instrument.lsf.as_vector(self.cube)
        self.fsf = self.instrument.fsf.as_image(self.cube)
        if self.fsf.shape[0] % 2 == 0 or self.fsf.shape[1] % 2 == 0:
            raise ValueError("FSF *must* be of odd dimensions")

        # Assert that the spread functions are normalized
        # fsfsum = np.nansum(self.fsf)
        # lsfsum = np.nansum(self.lsf)
        # assert fsfsum == 1.0, "FSF MUST be normalized, got %f." % fsfsum
        # assert lsfsum == 1.0, "LSF MUST be normalized, got %f." % lsfsum

        # Collect the shape of the FSF
        fh = self.fsf.shape[0]
        fw = self.fsf.shape[1]
        # The FSF *must* be odd-shaped, so these are integers
        fhh = (fh - 1) / 2  # FSF half height
        fhw = (fw - 1) / 2  # FSF half width

        # Set up the model to match against, from a class name or an instance
        if isinstance(model, LineModel):
            self.model = model
        else:
            self.model = model()
            if not isinstance(self.model, LineModel):
                raise TypeError("Provided model is not a LineModel")

        # Parameters boundaries
        min_boundaries = np.array(self.model.min_boundaries(self))
        max_boundaries = np.array(self.model.max_boundaries(self))

        self.logger.info("Min boundaries : %s" %
                         dict(zip(self.model.parameters(), min_boundaries)))
        self.logger.info("Max boundaries : %s" %
                         dict(zip(self.model.parameters(), max_boundaries)))

        # Assert that boundaries are consistent
        if (min_boundaries > max_boundaries).any():
            raise ValueError("Boundaries are inconsistent: min > max.")

        # Collect information about the model
        parameters_count = len(self.model.parameters())

        # Parameter jumping amplitude
        jump_amplitude = np.array(jump_amplitude)
        jumping_amplitude = np.ones(parameters_count) * jump_amplitude

        # Do we even need to do MH within Gibbs ?
        gpi = self.model.gibbs_parameter_index()
        do_gibbs = False if gpi is None else True

        if do_gibbs:
            self.logger.info("MH within Gibbs enabled for parameter `%s`." %
                             self.model.parameters()[gpi])
            # Make sure the Gibbs'd parameter has a jumping amplitude of 0
            jumping_amplitude[gpi] = 0
            # Compute the apriori variance if the user has not specified one
            if gibbs_apriori_variance is None:
                gibbs_apriori_variance = float(max_boundaries[gpi]**2)

        # Prepare the chain of parameters
        # One set of parameters per saved iteration, per spaxel
        try:
            self.chain = np.ndarray(
                (cnt_iterations, cube_height, cube_width, parameters_count))
        except MemoryError as e:
            self.logger.error(
                "Not enough RAM available for that many iterations. "
                "Use a higher value in the keep_one_in= parameter.")
            return

        # Prepare the chain of likelihoods
        likelihoods = np.ndarray((cnt_iterations, cube_height, cube_width))

        # Prepare the array of contributions
        # We only store "last iteration" contributions, or the RAM explodes.
        contributions = np.ndarray((
            cube_height,
            cube_width,  # spaxel coordinates
            cube_depth,
            cube_height,
            cube_width  # contribution data cube
        ))

        cur_iteration = 0  # Holder for the # of the current iteration
        cur_acceptance_rate = 0.

        # Initial parameters
        if initial_parameters is not None:
            # ... defined by the user
            if isinstance(initial_parameters, basestring):
                initial_parameters = np.load(initial_parameters)
            initial_parameters = np.array(initial_parameters)
            # ... so we need a sanity check
            ip_shape = initial_parameters.shape
            if ip_shape[0] != cube_height or ip_shape[1] != cube_width:
                raise ValueError(
                    "Initial params MUST have (%d, %d) shape, got (%d, %d)." %
                    (cube_height, cube_width, ip_shape[0], ip_shape[1]))
            # All is OK, set up the chain with the provided parameters
            self.chain[0] = initial_parameters
        else:
            # ... picked at random between boundaries
            for (y, x) in self.spaxel_iterator():
                p_new = \
                    min_boundaries + (max_boundaries - min_boundaries) * \
                    np.random.rand(parameters_count)
                self.chain[0][y][x] = p_new

        # Initial simulation, zeroes everywhere
        sim = np.zeros_like(cube.data)

        self.logger.info("Iteration #1")

        lsf_fft = None  # memoization holder for performance
        for (y, x) in self.spaxel_iterator():

            contribution, lsf_fft = self.contribution_of_spaxel(
                x,
                y,
                self.chain[0][y][x],
                cube_width,
                cube_height,
                cube_depth,
                self.fsf,
                self.lsf,
                lsf_fft=lsf_fft)

            sim = sim + contribution
            contributions[y, x, :, :, :] = contribution

        # Initial error/difference between simulation and data
        err_old = cube.data - sim
        cur_iteration += 1

        # Holds the current parameters
        parameters = self.chain[0].copy()

        # Accepted iterations counter (we accepted the whole first iteration)
        accepted_count = spaxels_count

        # Loop as many times as specified, as long as the acceptance is OK
        while \
                cur_iteration < max_iterations \
                and \
                (
                    cur_acceptance_rate > min_acceptance_rate or
                    cur_acceptance_rate == 0.
                ):

            # Will we save that iteration in the chain ?
            should_save_iteration = cur_iteration % keep_one_in == 0

            # Acceptance rate
            max_accepted_count = spaxels_count * cur_iteration
            if max_accepted_count > 0:
                cur_acceptance_rate = \
                    float(accepted_count) / float(max_accepted_count)

            self.logger.info(
                "Iteration #%d / %d, %2.0f%%" %
                (cur_iteration + 1, max_iterations, 100 * cur_acceptance_rate))

            # Loop through all spaxels
            for (y, x) in self.spaxel_iterator():

                # Compute a new set of parameters for this spaxel
                p_old = np.array(parameters[y][x].tolist())
                p_new = self.jump_from(p_old, jumping_amplitude)

                # Now, post-process the parameters, if the model requires it to
                self.model.post_jump(self, p_old, p_new)

                # Check if new parameters are within the boundaries
                # It happens quite often that parameters are out of boundaries,
                # so we do not log anything because it slows the script a lot.
                out_of_bounds = False
                too_low = np.array(p_new < min_boundaries)
                too_high = np.array(p_new > max_boundaries)
                if too_low.any() or too_high.any():
                    # print "New proposed parameters are out of boundaries."
                    out_of_bounds = True
                    if not do_gibbs:
                        i = cur_iteration / keep_one_in
                        self.chain[i][y][x] = p_old
                        continue

                # Compute the contribution of the new parameters
                contribution, lsf_fft = self.contribution_of_spaxel(
                    x,
                    y,
                    p_new,
                    cube_width,
                    cube_height,
                    cube_depth,
                    self.fsf,
                    self.lsf,
                    lsf_fft=lsf_fft)

                # Remove contribution of parameters of previous iteration
                # This gives the residual of the contribution of the spectral
                # lines for each spaxel, except this one.
                ul = err_old + contributions[y, x]
                # Add contribution of new parameters
                err_new = ul - contribution

                # Compute the limits of the affected spatial section, as the
                # contribution of one pixel is at most of the size of the FSF,
                # and may be less if we're close to the borders.
                y_min = max(y - fhh, 0)
                y_max = min(y + fhh + 1, cube_height)
                x_min = max(x - fhw, 0)
                x_max = min(x + fhw + 1, cube_width)

                # Actual acceptance ratio
                # Optimized by computing only around the spatial area that was
                # modified, aka. the area of the FSF around our current spaxel.
                err_new_part = err_new[:, y_min:y_max, x_min:x_max]
                err_old_part = err_old[:, y_min:y_max, x_min:x_max]

                # Extract the variance for this part of the cube
                var_part = self.variance_cube[:, y_min:y_max, x_min:x_max]

                # Two sums of small arrays (shaped at most like the FSF) is
                # faster than one sum of a big array (shaped like the cube).
                ar_part_old = 0.5 * bn.nansum(err_old_part**2 / var_part)
                ar_part_new = 0.5 * bn.nansum(err_new_part**2 / var_part)

                cur_acceptance = ar_part_old - ar_part_new

                # Store the likelihood ratio for this iteration and spaxel,
                # which is the NLL : negged log likelihood
                if should_save_iteration:
                    i = cur_iteration / keep_one_in
                    likelihoods[i][y][x] = cur_acceptance

                # Minimum acceptance ratio, picked randomly between -∞ and 0
                min_acceptance = log(np.random.rand())

                # Save new parameters only if acceptance ratio is acceptable
                if min_acceptance < cur_acceptance and not out_of_bounds:
                    contributions[y, x, :, :, :] = contribution
                    accepted_count += 1
                    err_old = err_new
                    p_end = p_new.copy()
                else:
                    # Otherwise the new parameters are the same as the old ones
                    p_end = p_old.copy()

                # Save the parameters
                parameters[y][x] = p_end
                if should_save_iteration:
                    i = cur_iteration / keep_one_in
                    self.chain[i][y][x] = p_end

                # Assert that the amplitude has not changed (it passes)
                # assert p_end[gpi] == parameters[cur_iteration-1][y][x][gpi]

                if do_gibbs:
                    # GIBBS
                    # Note: some of these maths are tied to the fact that the
                    # gibbsed value is the amplitude.

                    # Collect some values we'll need
                    gibbsed_value = parameters[y][x][gpi]
                    # Compute some subcubes we'll need
                    ul_part = ul[:, y_min:y_max, x_min:x_max]
                    # The contribution of the spatially and spectrally
                    # convolved line of unit flux at this spaxel.
                    ek_part = contributions[y, x, :, y_min:y_max, x_min:x_max]
                    # fixme
                    # Below, we assume the gibbsed value is the amplitude !
                    if gibbsed_value != 0:
                        # This should be of unit flux, hence the normalization.
                        ek_part = ek_part / gibbsed_value
                    else:
                        # The contribution of the previous iteration is empty,
                        # as the amplitude is zero. This usually only happens
                        # when you set the initial parameter of the amplitude
                        # to zero.
                        # In order to make the following math work, we need
                        # to have a non-null contribution, with amplitude 1.
                        # Therefore, we create a normalized contribution.
                        p_one = parameters[y][x].copy()
                        p_one[gpi] = 1.
                        contribution_one, _ = self.contribution_of_spaxel(
                            x,
                            y,
                            p_one,
                            cube_width,
                            cube_height,
                            cube_depth,
                            self.fsf,
                            self.lsf,
                            lsf_fft=None)
                        ek_part = contribution_one[:, y_min:y_max, x_min:x_max]

                    # Compute the characteristics of the gaussian aposteriori
                    ra = gibbs_apriori_variance  # apriori variance
                    ro = ra / (1. + ra * np.sum(ek_part**2 / var_part))
                    mu = ro * np.sum(ek_part * ul_part / var_part)
                    # Pick from a random truncated normal distribution
                    r = rtnorm(min_boundaries[gpi],
                               max_boundaries[gpi],
                               mu=mu,
                               sigma=np.sqrt(ro))[0]

                    # And now re-compute everything
                    p_end[gpi] = r

                    # It's costly to recompute the contribution !
                    #contribution_test, _ = self.contribution_of_spaxel(
                    #    x, y, p_end,
                    #    cube_width, cube_height, cube_depth,
                    #    self.fsf, self.lsf, lsf_fft=None
                    #)
                    # We can instead use the subcubes we already computed
                    contribution = np.zeros(cube.shape)
                    contribution[:, y_min:y_max, x_min:x_max] = ek_part * r

                    err_new = ul - contribution

                    # And, finally, write it
                    contributions[y, x, :, :, :] = contribution
                    err_old = err_new
                    parameters[y][x] = p_end
                    if should_save_iteration:
                        i = cur_iteration / keep_one_in
                        self.chain[i][y][x] = p_end

            # We compute the error based on the error of the previous iteration
            # and therefore, due to numerical instability, small errors slowly
            # creep in. To squash them, we sometimes recompute a "fresh" error.
            # The error creep is usually in the 1e-14 order.
            if cur_iteration % 1000 == 0:
                err_dbg = self._compute_error_in_one_step(
                    self.cube.data, parameters, self.fsf, self.lsf)
                # assert np.allclose(err_dbg, err_old, atol=0., rtol=1e-06)
                # diff = np.abs(err_dbg - err_old)
                # print "Corrected error creep of ", np.amax(diff)
                err_old = err_dbg

            # Prepare data for the next iteration
            cur_iteration += 1

        # Output
        self.likelihoods = likelihoods
        self.parameters = self.extract_parameters()
        self.convolved_cube = Cube(data=self.simulate_convolved(
            cube_shape, self.parameters),
                                   meta=self.cube.meta)
        self.clean_cube = Cube(data=self.simulate_clean(
            cube_shape, self.parameters),
                               meta=self.cube.meta)
Esempio n. 14
0
from rtnorm import rtnorm
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats


a = 8.3385004885 
b = 15.0602645553
n = 10000

rtnorm_obj = rtnorm()
X = rtnorm_obj.sample(a, b,size=n)
print(X)
x = np.linspace(a,10,n)
plt.plot(x,stats.truncnorm.pdf(x, a, 10),'r')
plt.plot(x,rtnorm_obj.probabilites(x, a, 10),'g')
plt.hist(X,bins =100, normed=1,alpha=.3)
plt.show()
Esempio n. 15
0
from rtnorm import rtnorm
import numpy as np
import matplotlib.pyplot as plt
from scipy import stats

a = 8.3385004885
b = 15.0602645553
n = 10000

rtnorm_obj = rtnorm()
X = rtnorm_obj.sample(a, b, size=n)
print(X)
x = np.linspace(a, 10, n)
plt.plot(x, stats.truncnorm.pdf(x, a, 10), 'r')
plt.plot(x, rtnorm_obj.probabilites(x, a, 10), 'g')
plt.hist(X, bins=100, normed=1, alpha=.3)
plt.show()