Пример #1
0
def terminal_conditions(data):
    """
    Create the terminal state of the CSM.

    Parameters
    ----------
    data : dict
        Problem data.
    """
    csm = data['csm']

    q_csm_f = tools.qmult(data['lm']['q'], csm.q_dock)
    if q_csm_f.dot(data['x0']['q']) < 0.:
        # This makes sure q_csm_f is the same as the destination quaternion for
        # SLERP, which will flip the sign of the destination quaternion in order
        # to take the shortest path
        q_csm_f *= -1.
    p_csm_f = (data['lm']['p'] + tools.rotate(csm.r_dock_lm, data['lm']['q']) -
               tools.rotate(csm.r_dock_csm, q_csm_f))
    w_csm_f = tools.rotate(data['lm']['w'], tools.qconj(csm.q_dock))
    # L_v_dock: docking impact velocity in LM frame
    L_v_dock = data['v_dock'] * tools.rotate(np.array([1., 0., 0.]),
                                             csm.q_dock)
    v_csm_f = tools.rotate(L_v_dock + np.cross(data['lm']['w'], csm.r_dock_lm),
                           data['lm']['q'])

    data['xf'] = dict(p=p_csm_f, v=v_csm_f, q=q_csm_f, w=w_csm_f)
Пример #2
0
 def get_leg_absolute_position(self, position, orientation):
     # Function that computes the leg absolute position from the current position of the robot and the relative position of the leg in its own landmark
     # Input:
     #   Robot_object : Structure robot, that gives opsition and orientation in absolute landmark
     if self.side == 'right':
         return np.array(position) + np.array((tools.rotate(
             self.fix_position + self.relative_feet_position[0:2],
             orientation)).tolist() + [self.relative_feet_position[2]])
     else:
         return np.array(position) + np.array((tools.rotate(
             self.fix_position - self.relative_feet_position[0:2],
             orientation)).tolist() + [self.relative_feet_position[2]])
Пример #3
0
 def get_leg_relative_position(self, position, orientation):
     # Opposite function to get_leg_absolute_position. Gives relative position of the leg considering its absolute position and the absolute position of the robot
     # Input:
     #   Robot_object : Structure robot, that gives opsition and orientation in absolute landmark
     if self.side == 'right':
         return np.array((
             tools.rotate(self.absolute_feet_position -
                          position, [orientation[0], -orientation[1]]) -
             self.fix_position).tolist() + [self.absolute_feet_position[2]])
     else:
         return np.array((
             tools.rotate(self.absolute_feet_position -
                          position, [orientation[0], -orientation[1]]) -
             self.fix_position).tolist() + [self.absolute_feet_position[2]])
Пример #4
0
        def dxdt(t, x, u):
            """
            Compute state time derivative using the unforced non-linear
            dynamics.

            Parameters
            ----------
            t : float
                The current time in the control interval (starting from zero).
            x : np.ndarray
                The current state.
            u : np.ndarray
                The array of pulse widths for each thruster.

            Returns
            -------
            : np.ndarray
                State time derivative.
            """
            input_on = [self.__thrust_signal(u[i], t) for i in range(self.M)]
            v, q, w = x[3:6], x[6:10], x[
                10:13]  # velocity,quaternion,angular rate
            if normalize_q:
                q /= la.norm(q)  # re-normalize
            dpdt = v
            dvdt = self.minv * sum([
                tools.rotate(
                    self.thruster_vector[i] * self.thrust_max * input_on[i], q)
                for i in range(self.M)
            ])
            dqdt = 0.5 * tools.qmult(q, tools.qpure(w))
            dwdt = (
                sum([self.Jinv_rxF[i] * input_on[i] for i in range(self.M)]) -
                self.Jinv.dot(np.cross(w, self.J.dot(w))))
            return np.concatenate([dpdt, dvdt, dqdt, dwdt])
Пример #5
0
    def move_snake(self, *args):
        """
        move snake
        """
        if self.dr != [0, 0]:
            # if it is needed to add a new part
            if self.add_new_part:
                self.add_part()

            # if there is a movement
            # store current snake body positions
            old_r = [tuple(part.pos) for part in self.snake_parts]
            for i, part in enumerate(self.snake_parts):
                if i == 0:
                    # move head
                    part.pos = Vector(part.pos) + self.dr
                    rotate(part, Vector(self.dr), Vector(self.dr))
                else:
                    part.pos = old_r[i-1]
                    rotate(part, Vector(self.snake_parts[i-1].pos)-Vector(part.pos),
                                 Vector(old_r[i-1]) - Vector(old_r[i]))

            # check snake head collisions
            self.check_collision(self.snake_parts[0])
Пример #6
0
def get_aligned_cutout(map_params, image, l=None, cl=None, cl_noise=None):

    if cl_noise is None:
        l = np.arange(10000)
        cl = np.ones(max(l) + 1)
        cl_noise = np.zeros(max(l) + 1)

    _, dx, _, _ = map_params
    cutout = tools.central_cutout(map_params, image, 10)
    wiener_filter = tools.wiener_filter(l, cl, cl_noise)
    filtered_map = tools.convolve(image, l, wiener_filter, map_params)
    low_pass_filter = tools.low_pass_filter(l, 2000)
    filtered_map = tools.convolve(filtered_map, l, low_pass_filter, map_params)
    filtered_cutout = tools.central_cutout(map_params, filtered_map, 6)
    _, _, magnitude, angle = tools.gradient(filtered_cutout, dx)
    angle, magnitude_weight = np.median(angle), np.median(magnitude)
    cutout_aligned = tools.rotate(cutout, angle)
    cutout_aligned -= np.median(cutout_aligned)

    return cutout_aligned, magnitude_weight
Пример #7
0
def cmb_mock_data(map_params,
                  l,
                  cl,
                  cluster=None,
                  centroid_shift_value=0,
                  nber_ch=1,
                  cluster_corr_cutouts=None,
                  cl_extragal=None,
                  bl=None,
                  nl=None):

    nx, dx, ny, dy = map_params
    sim = tools.make_gaussian_realization(map_params, l, cl)

    if cluster is not None:
        M, c, z = cluster
        x_shift, y_shift = np.random.normal(
            loc=0.0, scale=centroid_shift_value), np.random.normal(
                loc=0.0, scale=centroid_shift_value)
        centroid_shift = [x_shift, y_shift]
        kappa_map = lensing.NFW(M, c, z, 1100).convergence_map(
            map_params, centroid_shift=centroid_shift)
        alpha_vec = lensing.deflection_from_convergence(map_params, kappa_map)
        sim = lensing.lens_map(map_params, sim, alpha_vec)

    sims_ch_arr = [np.copy(sim) for k in range(nber_ch)]

    if cluster_corr_cutouts is not None:
        if np.asarray(cluster_corr_cutouts).ndim is 3:
            cluster_corr_cutouts = [cluster_corr_cutouts]
        cluster_corr_cutout = cluster_corr_cutouts[0][0]
        nx_cutout, ny_cutout = cluster_corr_cutout.shape[
            0], cluster_corr_cutout.shape[1]
        s, e = int((nx - nx_cutout) / 2), int((ny + ny_cutout) / 2)
        rand_sel = random.randint(0, len(cluster_corr_cutouts[0]) - 1)
        rand_ang = random.randint(-180, 180)
        for j in range(nber_ch):
            cluster_corr_cutout = tools.rotate(
                cluster_corr_cutouts[j][rand_sel], rand_ang)
            sims_ch_arr[j][s:e,
                           s:e] = sims_ch_arr[j][s:e,
                                                 s:e] + cluster_corr_cutout

    if cl_extragal is not None:
        if isinstance(cl_extragal, list) is False:
            cl_extragal = [cl_extragal]
        extragal_maps = tools.make_gaussian_realization(
            map_params, l, cl_extragal)
        for j in range(nber_ch):
            sims_ch_arr[j] += extragal_maps[j]

    if bl is not None:
        if isinstance(bl, list) is False:
            bl = [bl]
        for j in range(nber_ch):
            sims_ch_arr[j] = tools.convolve(sims_ch_arr[j],
                                            l,
                                            np.sqrt(bl[j]),
                                            map_params=map_params)

    if nl is not None:
        if isinstance(nl, list) is False:
            nl = [nl]
        for j in range(nber_ch):
            noise_map = tools.make_gaussian_realization(map_params, l, nl[j])
            sims_ch_arr[j] += noise_map

    if nber_ch == 1:
        return sims_ch_arr[0]

    return sims_ch_arr
Пример #8
0
def cmb_test_data(nber_maps,
                  validation_analyis=False,
                  clus_position_analysis=False,
                  extragal_bias_analysis=False):
    nx, dx, ny, dy = 240, 0.25, 240, 0.25
    map_params = [nx, dx, ny, dy]
    l, cl = CosmoCalc().cmb_power_spectrum()
    l, bl = exp.beam_power_spectrum(1.4)
    l, nl = exp.white_noise_power_spectrum(2.0)

    if validation_analyis is True:
        sims_clus_2e14, sims_clus_6e14, sims_clus_10e14 = [], [], []
        kappa_map_2e14 = lensing.NFW(2e14, 3, 1,
                                     1100).convergence_map(map_params)
        kappa_map_6e14 = lensing.NFW(6e14, 3, 1,
                                     1100).convergence_map(map_params)
        kappa_map_10e14 = lensing.NFW(10e14, 3, 1,
                                      1100).convergence_map(map_params)
        alpha_vec_2e14 = lensing.deflection_from_convergence(
            map_params, kappa_map_2e14)
        alpha_vec_6e14 = lensing.deflection_from_convergence(
            map_params, kappa_map_6e14)
        alpha_vec_10e14 = lensing.deflection_from_convergence(
            map_params, kappa_map_10e14)
        for i in range(nber_maps):
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sim_clus_2e14 = lensing.lens_map(map_params, sim, alpha_vec_2e14)
            sim_clus_6e14 = lensing.lens_map(map_params, sim, alpha_vec_6e14)
            sim_clus_10e14 = lensing.lens_map(map_params, sim, alpha_vec_10e14)
            sim_clus_2e14 = tools.convolve(sim_clus_2e14,
                                           l,
                                           np.sqrt(bl),
                                           map_params=map_params)
            sim_clus_6e14 = tools.convolve(sim_clus_6e14,
                                           l,
                                           np.sqrt(bl),
                                           map_params=map_params)
            sim_clus_10e14 = tools.convolve(sim_clus_10e14,
                                            l,
                                            np.sqrt(bl),
                                            map_params=map_params)
            noise_map = tools.make_gaussian_realization(map_params, l, nl)
            sim_clus_2e14 += noise_map
            sim_clus_6e14 += noise_map
            sim_clus_10e14 += noise_map
            sims_clus_2e14.append(sim_clus_2e14)
            sims_clus_6e14.append(sim_clus_6e14)
            sims_clus_10e14.append(sim_clus_10e14)
        return sims_clus_2e14, sims_clus_6e14, sims_clus_10e14

    if clus_position_analysis is True:
        sims_baseline, sims_centorid_shift = [], []
        kappa_map_6e14_baseline = lensing.NFW(2e14, 3, 1,
                                              1100).convergence_map(map_params)
        alpha_vec_6e14_baseline = lensing.deflection_from_convergence(
            map_params, kappa_map_6e14_baseline)
        for i in range(nber_maps):
            x_shift, y_shift = np.random.normal(
                loc=0.0, scale=0.5), np.random.normal(loc=0.0, scale=0.5)
            centroid_shift = [x_shift, y_shift]
            kappa_map_6e14_centroid_shift = lensing.NFW(
                6e14, 3, 1, 1100).convergence_map(map_params, centroid_shift)
            alpha_vec_6e14_centroid_shift = lensing.deflection_from_convergence(
                map_params, kappa_map_6e14_centroid_shift)
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sim_baseline = lensing.lens_map(map_params, sim,
                                            alpha_vec_6e14_baseline)
            sim_centroid_shift = lensing.lens_map(
                map_params, sim, alpha_vec_6e14_centroid_shift)
            sim_baseline = tools.convolve(sim_baseline,
                                          l,
                                          np.sqrt(bl),
                                          map_params=map_params)
            sim_centroid_shift = tools.convolve(sim_centroid_shift,
                                                l,
                                                np.sqrt(bl),
                                                map_params=map_params)
            noise_map = tools.make_gaussian_realization(map_params, l, nl)
            sim_baseline += noise_map
            sim_centroid_shift += noise_map
            sims_baseline.append(sim_baseline)
            sims_centroid_shift.append(sim_centroid_shift)
        return sims_baseline, sims_centroid_shift

    if extragal_bias_analysis is True:
        sims_baseline, sims_tsz, sims_ksz, sims_tsz_ksz = [], [], [], []
        c500 = concentration.concentration(2e14, '500c', 0.7)
        M200c, _, c200c = mass_defs.changeMassDefinition(2e14,
                                                         c500,
                                                         0.7,
                                                         '500c',
                                                         '200c',
                                                         profile='nfw')
        kappa_map_M200c = lensing.NFW(M200c, c200c, 0.7,
                                      1100).convergence_map(map_params)
        alpha_vec_M200c = lensing.deflection_from_convergence(
            map_params, kappa_map_M200c)
        fname = '/Volumes/Extreme_SSD/codes/master_thesis/code/data/mdpl2_cutouts_for_tszksz_clus_detection_M1.7e+14to2.3e+14_z0.6to0.8_15320haloes_boxsize20.0am.npz'
        cutouts_dic = np.load(fname, allow_pickle=1,
                              encoding='latin1')['arr_0'].item()
        mass_z_key = list(cutouts_dic.keys())[0]
        cutouts = cutouts_dic[mass_z_key]
        scale_fac = fg.compton_y_to_delta_Tcmb(145, uK=True)
        tsz_cutouts, ksz_cutouts = [], []
        for kcntr, keyname in enumerate(cutouts):
            tsz_cutout = cutouts[keyname]['y'] * scale_fac
            tsz_cutouts.append(tsz_cutout)
            ksz_cutout = cutouts[keyname]['ksz'] * random.randrange(-1, 2, 2)
            ksz_cutouts.append(ksz_cutout)
        s, e = int((nx - 40) / 2), int((ny + 40) / 2)
        for i in range(nber_maps):
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sim_M200c = lensing.lens_map(map_params, sim, alpha_vec_M200c)
            sim_baseline, sim_tsz, sim_ksz, sim_tsz_ksz = np.copy(
                sim_M200c), np.copy(sim_M200c), np.copy(sim_M200c), np.copy(
                    sim_M200c)
            tsz_cutout = tools.rotate(
                tsz_cutouts[random.randint(0,
                                           len(tsz_cutouts) - 1)],
                random.randint(-180, 180))
            ksz_cutout = tools.rotate(
                ksz_cutouts[random.randint(0,
                                           len(ksz_cutouts) - 1)],
                random.randint(-180, 180))
            tsz_ksz_cutout = tsz_cutout + ksz_cutout
            sim_tsz[s:e, s:e] = sim_tsz[s:e, s:e] + tsz_cutout
            sim_ksz[s:e, s:e] = sim_ksz[s:e, s:e] + ksz_cutout
            sim_tsz_ksz[s:e, s:e] = sim_tsz_ksz[s:e, s:e] + tsz_ksz_cutout
            sim_baseline = tools.convolve(sim_baseline,
                                          l,
                                          np.sqrt(bl),
                                          map_params=map_params)
            sim_tsz = tools.convolve(sim_tsz,
                                     l,
                                     np.sqrt(bl),
                                     map_params=map_params)
            sim_ksz = tools.convolve(sim_ksz,
                                     l,
                                     np.sqrt(bl),
                                     map_params=map_params)
            sim_tsz_ksz = tools.convolve(sim_tsz_ksz,
                                         l,
                                         np.sqrt(bl),
                                         map_params=map_params)
            noise_map = tools.make_gaussian_realization(map_params, l, nl)
            sim_baseline += noise_map
            sim_tsz += noise_map
            sim_ksz += noise_map
            sim_tsz_ksz += noise_map
            sims_baseline.append(sim_baseline)
            sims_tsz.append(sim_tsz)
            sims_ksz.append(sim_ksz)
            sims_tsz_ksz.append(sim_tsz_ksz)
        return sims_baseline, sims_tsz, sims_ksz, sims_tsz_ksz
Пример #9
0
 def dzdt(z, t_ref, x_ref, u_ref):
     """
     The dynamics of the "concatenated" state z which allows to compute
     the discrete-time LTV updated matrices A, B and residual r.
     
     Parameters
     ----------
     z : np.ndarray
         The current concantenated state.
     t_ref : float
         Reference time about which to linearize the non-linear dynamics.
     x_ref : np.ndarray
         Reference state about which to linearize the non-linear
         dynamics.
     u_ref : np.ndarray
         Reference input about which to linearize the non-linear
         dynamics.
     
     Returns
     -------
     : np.ndarray
         The concatenated state's time derivative.
     """
     # Extract needed terms
     Phi_w = self.dltv_zget(z, 'Phi_w')
     Phi_w_inv = la.inv(Phi_w)
     Phi_q = self.dltv_zget(z, 'Phi_q')
     Phi_q_inv = la.inv(Phi_q)
     Psi_r = self.dltv_zget(z, 'Psi_r')
     Psi_u = self.dltv_zget(z, 'Psi_u')
     Psi_q = self.dltv_zget(z, 'Psi_q')
     Psi_E = self.dltv_zget(z, 'Psi_E')
     Psi_Ew = self.dltv_zget(z, 'Psi_Ew')
     Psi_Er = self.dltv_zget(z, 'Psi_Er')
     Psi_vrp = self.dltv_zget(z, 'Psi_vrp')
     Psi_vrppp = self.dltv_zget(z, 'Psi_vrppp')
     Psi_vrpppp = self.dltv_zget(z, 'Psi_vrpppp')
     Psi_F = self.dltv_zget(z, 'Psi_F')
     Psi_vq = self.dltv_zget(z, 'Psi_vq')
     Psi_vqw = self.dltv_zget(z, 'Psi_vqw')
     Psi_vqu = self.dltv_zget(z, 'Psi_vqu')
     q_ref = x_ref[6:10]
     w_ref = x_ref[10:13]
     # Linearize about current propagated trajectory value
     A, r = self.linearize(q_ref, w_ref, u_ref, t_ref)
     # Compute time derivatives
     Phi_w_dot = A['ww'].dot(Phi_w)
     Psi_r_dot = Phi_w_inv.dot(r['w'])
     Psi_u_dot = Phi_w_inv
     Phi_q_dot = A['qq'].dot(Phi_q)
     E = Phi_q_inv.dot(A['qw']).dot(Phi_w)
     Psi_E_dot = E
     Psi_q_dot = Phi_q_inv.dot(r['q'])
     Psi_Er_dot = E.dot(Psi_r)
     Psi_Ew_dot = E.dot(Psi_u)
     Psi_vq_dot = A['vq'].dot(Phi_q)
     Psi_vqw_dot = Psi_vq_dot.dot(Psi_E)
     Psi_vqu_dot = Psi_vq_dot.dot(Psi_Ew)
     Psi_vrp_dot = -A['vq'].dot(q_ref)
     Psi_vrppp_dot = Psi_vq_dot.dot(Psi_q)
     Psi_vrpppp_dot = Psi_vq_dot.dot(Psi_Er)
     Psi_pvr_dot = Psi_vrp
     Psi_F_dot = np.column_stack([
         tools.rotate(self.thruster_vector[i] * self.thrust_max, q_ref)
         for i in range(self.M)
     ])
     Psi_prF_dot = Psi_F
     Psi_pvq_dot = Psi_vq
     Psi_pvrppp_dot = Psi_vrppp
     Psi_pw2_dot = Psi_vqw
     Psi_pEr2_dot = Psi_vrpppp
     Psi_pvqu_dot = Psi_vqu
     # Create the conflomerate state time derivative
     dzdt = np.empty(self.dltv_z0.shape)
     dzdt[self.dltv_map['Phi_w']['idx']] = Phi_w_dot.flatten()
     dzdt[self.dltv_map['Psi_r']['idx']] = Psi_r_dot.flatten()
     dzdt[self.dltv_map['Psi_u']['idx']] = Psi_u_dot.flatten()
     dzdt[self.dltv_map['Phi_q']['idx']] = Phi_q_dot.flatten()
     dzdt[self.dltv_map['Psi_E']['idx']] = Psi_E_dot.flatten()
     dzdt[self.dltv_map['Psi_q']['idx']] = Psi_q_dot.flatten()
     dzdt[self.dltv_map['Psi_Er']['idx']] = Psi_Er_dot.flatten()
     dzdt[self.dltv_map['Psi_Ew']['idx']] = Psi_Ew_dot.flatten()
     dzdt[self.dltv_map['Psi_vq']['idx']] = Psi_vq_dot.flatten()
     dzdt[self.dltv_map['Psi_vqw']['idx']] = Psi_vqw_dot.flatten()
     dzdt[self.dltv_map['Psi_vqu']['idx']] = Psi_vqu_dot.flatten()
     dzdt[self.dltv_map['Psi_vrp']['idx']] = Psi_vrp_dot.flatten()
     dzdt[self.dltv_map['Psi_vrppp']['idx']] = Psi_vrppp_dot.flatten()
     dzdt[self.dltv_map['Psi_vrpppp']['idx']] = Psi_vrpppp_dot.flatten()
     dzdt[self.dltv_map['Psi_pvr']['idx']] = Psi_pvr_dot.flatten()
     dzdt[self.dltv_map['Psi_F']['idx']] = Psi_F_dot.flatten()
     dzdt[self.dltv_map['Psi_prF']['idx']] = Psi_prF_dot.flatten()
     dzdt[self.dltv_map['Psi_pvq']['idx']] = Psi_pvq_dot.flatten()
     dzdt[self.dltv_map['Psi_pvrppp']['idx']] = Psi_pvrppp_dot.flatten()
     dzdt[self.dltv_map['Psi_pw2']['idx']] = Psi_pw2_dot.flatten()
     dzdt[self.dltv_map['Psi_pEr2']['idx']] = Psi_pEr2_dot.flatten()
     dzdt[self.dltv_map['Psi_pvqu']['idx']] = Psi_pvqu_dot.flatten()
     return dzdt
Пример #10
0
    def dltv(self, t_star, x_star, u_star, **odeopts):
        """
        Compute discrete-time LTV system using impulse thrusts and the
        trajectory {t_star,x_star,u_star} as reference.

        Parameters
        ----------
        t_star : np.ndarray
            The temporal nodes of the reference trajectory.
        x_star : np.ndarray
            The states of the reference trajectory, as a matrix whose row k is
            the state at time t_star[k].
        u_star : np.ndarray
            The inputs of the reference trajectory, as a matrix whose row k is
            the input at time t_star[k].
        **odeopts : keyword arguments
            Arguments to pass to tools.odeint.

        Returns
        -------
        Ad : dict
            Discrete update A[k] matrices.
        Bd : dict
            Discrete update B[k] matrices.
        rd : dict
            Discrete update r[k] residual terms.
        x_nl : list
            List of trajectories obtained from propagating the non-linear
            dynamics using u_star, with re-setting for each control (silent
            time) segment. Element k of the list is the k-th control segment.
        """
        def dzdt(z, t_ref, x_ref, u_ref):
            """
            The dynamics of the "concatenated" state z which allows to compute
            the discrete-time LTV updated matrices A, B and residual r.
            
            Parameters
            ----------
            z : np.ndarray
                The current concantenated state.
            t_ref : float
                Reference time about which to linearize the non-linear dynamics.
            x_ref : np.ndarray
                Reference state about which to linearize the non-linear
                dynamics.
            u_ref : np.ndarray
                Reference input about which to linearize the non-linear
                dynamics.
            
            Returns
            -------
            : np.ndarray
                The concatenated state's time derivative.
            """
            # Extract needed terms
            Phi_w = self.dltv_zget(z, 'Phi_w')
            Phi_w_inv = la.inv(Phi_w)
            Phi_q = self.dltv_zget(z, 'Phi_q')
            Phi_q_inv = la.inv(Phi_q)
            Psi_r = self.dltv_zget(z, 'Psi_r')
            Psi_u = self.dltv_zget(z, 'Psi_u')
            Psi_q = self.dltv_zget(z, 'Psi_q')
            Psi_E = self.dltv_zget(z, 'Psi_E')
            Psi_Ew = self.dltv_zget(z, 'Psi_Ew')
            Psi_Er = self.dltv_zget(z, 'Psi_Er')
            Psi_vrp = self.dltv_zget(z, 'Psi_vrp')
            Psi_vrppp = self.dltv_zget(z, 'Psi_vrppp')
            Psi_vrpppp = self.dltv_zget(z, 'Psi_vrpppp')
            Psi_F = self.dltv_zget(z, 'Psi_F')
            Psi_vq = self.dltv_zget(z, 'Psi_vq')
            Psi_vqw = self.dltv_zget(z, 'Psi_vqw')
            Psi_vqu = self.dltv_zget(z, 'Psi_vqu')
            q_ref = x_ref[6:10]
            w_ref = x_ref[10:13]
            # Linearize about current propagated trajectory value
            A, r = self.linearize(q_ref, w_ref, u_ref, t_ref)
            # Compute time derivatives
            Phi_w_dot = A['ww'].dot(Phi_w)
            Psi_r_dot = Phi_w_inv.dot(r['w'])
            Psi_u_dot = Phi_w_inv
            Phi_q_dot = A['qq'].dot(Phi_q)
            E = Phi_q_inv.dot(A['qw']).dot(Phi_w)
            Psi_E_dot = E
            Psi_q_dot = Phi_q_inv.dot(r['q'])
            Psi_Er_dot = E.dot(Psi_r)
            Psi_Ew_dot = E.dot(Psi_u)
            Psi_vq_dot = A['vq'].dot(Phi_q)
            Psi_vqw_dot = Psi_vq_dot.dot(Psi_E)
            Psi_vqu_dot = Psi_vq_dot.dot(Psi_Ew)
            Psi_vrp_dot = -A['vq'].dot(q_ref)
            Psi_vrppp_dot = Psi_vq_dot.dot(Psi_q)
            Psi_vrpppp_dot = Psi_vq_dot.dot(Psi_Er)
            Psi_pvr_dot = Psi_vrp
            Psi_F_dot = np.column_stack([
                tools.rotate(self.thruster_vector[i] * self.thrust_max, q_ref)
                for i in range(self.M)
            ])
            Psi_prF_dot = Psi_F
            Psi_pvq_dot = Psi_vq
            Psi_pvrppp_dot = Psi_vrppp
            Psi_pw2_dot = Psi_vqw
            Psi_pEr2_dot = Psi_vrpppp
            Psi_pvqu_dot = Psi_vqu
            # Create the conflomerate state time derivative
            dzdt = np.empty(self.dltv_z0.shape)
            dzdt[self.dltv_map['Phi_w']['idx']] = Phi_w_dot.flatten()
            dzdt[self.dltv_map['Psi_r']['idx']] = Psi_r_dot.flatten()
            dzdt[self.dltv_map['Psi_u']['idx']] = Psi_u_dot.flatten()
            dzdt[self.dltv_map['Phi_q']['idx']] = Phi_q_dot.flatten()
            dzdt[self.dltv_map['Psi_E']['idx']] = Psi_E_dot.flatten()
            dzdt[self.dltv_map['Psi_q']['idx']] = Psi_q_dot.flatten()
            dzdt[self.dltv_map['Psi_Er']['idx']] = Psi_Er_dot.flatten()
            dzdt[self.dltv_map['Psi_Ew']['idx']] = Psi_Ew_dot.flatten()
            dzdt[self.dltv_map['Psi_vq']['idx']] = Psi_vq_dot.flatten()
            dzdt[self.dltv_map['Psi_vqw']['idx']] = Psi_vqw_dot.flatten()
            dzdt[self.dltv_map['Psi_vqu']['idx']] = Psi_vqu_dot.flatten()
            dzdt[self.dltv_map['Psi_vrp']['idx']] = Psi_vrp_dot.flatten()
            dzdt[self.dltv_map['Psi_vrppp']['idx']] = Psi_vrppp_dot.flatten()
            dzdt[self.dltv_map['Psi_vrpppp']['idx']] = Psi_vrpppp_dot.flatten()
            dzdt[self.dltv_map['Psi_pvr']['idx']] = Psi_pvr_dot.flatten()
            dzdt[self.dltv_map['Psi_F']['idx']] = Psi_F_dot.flatten()
            dzdt[self.dltv_map['Psi_prF']['idx']] = Psi_prF_dot.flatten()
            dzdt[self.dltv_map['Psi_pvq']['idx']] = Psi_pvq_dot.flatten()
            dzdt[self.dltv_map['Psi_pvrppp']['idx']] = Psi_pvrppp_dot.flatten()
            dzdt[self.dltv_map['Psi_pw2']['idx']] = Psi_pw2_dot.flatten()
            dzdt[self.dltv_map['Psi_pEr2']['idx']] = Psi_pEr2_dot.flatten()
            dzdt[self.dltv_map['Psi_pvqu']['idx']] = Psi_pvqu_dot.flatten()
            return dzdt

        # Prepare storage variables
        N = len(t_star) - 1
        Ad = {k: None for k in range(N)}
        Bd = {k: None for k in range(N)}
        rd = {k: None for k in range(N)}

        # Propagate the input through the non-linear dynamics to generate a
        # reference trajectory
        # **trick**: re-set integration to x_star[k] at every interval
        t_nl, x_nl, x_nl_ct, mask_nl = [], [], [], []
        for k in range(N):
            _t, _x, _mask = self.sim(x0=x_star[k],
                                     u=np.array([u_star[k]]),
                                     t_f=t_star[k + 1] - t_star[k],
                                     normalize_q=False,
                                     **odeopts)
            t_nl.append(_t)
            x_nl.append(_x)
            x_nl_ct.append(
                siplt.interp1d(_t,
                               _x,
                               axis=0,
                               assume_sorted=True,
                               bounds_error=False,
                               fill_value=(_x[0], _x[-1])))
            mask_nl.append(_mask)

        # Compute discrete-time update for each interval
        for k in range(N):
            # Integrate the dynamic components
            pulse = u_star[k]
            z = tools.odeint(lambda t, z: dzdt(z, t, x_nl_ct[k](t), pulse),
                             x0=self.dltv_z0.copy(),
                             t=t_nl[k],
                             **odeopts)
            # Extract the dynamic components' values at t_star[k+1]
            j = [mask_nl[k][i] for i in range(self.M)]
            Phi_w_T = self.dltv_zget(z[-1], 'Phi_w')
            Phi_w = [self.dltv_zget(z[j[i]], 'Phi_w') for i in range(self.M)]
            Phi_w_inv = [la.inv(Phi_w[i]) for i in range(self.M)]
            Psi_r = self.dltv_zget(z[-1], 'Psi_r')
            Psi_u = [self.dltv_zget(z[j[i]], 'Psi_u') for i in range(self.M)]
            Phi_q_T = self.dltv_zget(z[-1], 'Phi_q')
            Phi_q = [self.dltv_zget(z[j[i]], 'Phi_q') for i in range(self.M)]
            Psi_E_T = self.dltv_zget(z[-1], 'Psi_E')
            Psi_E = [self.dltv_zget(z[j[i]], 'Psi_E') for i in range(self.M)]
            Psi_Ew = [self.dltv_zget(z[j[i]], 'Psi_Ew') for i in range(self.M)]
            Psi_q_T = self.dltv_zget(z[-1], 'Psi_q')
            Psi_Er_T = self.dltv_zget(z[-1], 'Psi_Er')
            Psi_vq_T = self.dltv_zget(z[-1], 'Psi_vq')
            Psi_vq = [self.dltv_zget(z[j[i]], 'Psi_vq') for i in range(self.M)]
            Psi_vqw = [
                self.dltv_zget(z[j[i]], 'Psi_vqw') for i in range(self.M)
            ]
            Psi_vqw_T = self.dltv_zget(z[-1], 'Psi_vqw')
            Psi_vqu = [
                self.dltv_zget(z[j[i]], 'Psi_vqu') for i in range(self.M)
            ]
            Psi_vrp_T = self.dltv_zget(z[-1], 'Psi_vrp')
            Psi_vrppp_T = self.dltv_zget(z[-1], 'Psi_vrppp')
            Psi_vrpppp_T = self.dltv_zget(z[-1], 'Psi_vrpppp')
            Psi_pvr_T = self.dltv_zget(z[-1], 'Psi_pvr')
            Psi_F = [
                self.dltv_zget(z[j[i]], 'Psi_F')[:, i] for i in range(self.M)
            ]
            Psi_prF = [
                self.dltv_zget(z[j[i]], 'Psi_prF')[:, i] for i in range(self.M)
            ]
            Psi_pvq_T = self.dltv_zget(z[-1], 'Psi_pvq')
            Psi_pvq = [
                self.dltv_zget(z[j[i]], 'Psi_pvq') for i in range(self.M)
            ]
            Psi_pvrppp_T = self.dltv_zget(z[-1], 'Psi_pvrppp')
            Psi_pw2_T = self.dltv_zget(z[-1], 'Psi_pw2')
            Psi_pw2 = [
                self.dltv_zget(z[j[i]], 'Psi_pw2') for i in range(self.M)
            ]
            Psi_pvqu = [
                self.dltv_zget(z[j[i]], 'Psi_pvqu') for i in range(self.M)
            ]
            Psi_pEr2_T = self.dltv_zget(z[-1], 'Psi_pEr2')
            # Compute the discrete-time update components
            thrust_vec_inertial = [
                tools.rotate(self.thruster_vector[i] * self.thrust_max,
                             x_nl[k][mask_nl[k][i]][6:10])
                for i in range(self.M)
            ]
            A_ww = Phi_w_T
            B_w = A_ww.dot(
                np.column_stack([
                    Phi_w_inv[i].dot(self.Jinv_rxF[i]) for i in range(self.M)
                ]))
            r_w = (
                # r_w_p
                A_ww.dot(Psi_r) +
                #r_w_pp
                A_ww.dot(
                    sum([(Psi_u[i] - Phi_w_inv[i] * pulse[i]).dot(
                        self.Jinv_rxF[i]) for i in range(self.M)])))
            A_qq = Phi_q_T
            A_qw = A_qq.dot(Psi_E_T)
            B_q = A_qq.dot(
                np.column_stack([(Psi_E_T - Psi_E[i]).dot(Phi_w_inv[i]).dot(
                    self.Jinv_rxF[i]) for i in range(self.M)]))
            r_q = (
                # r_q_p
                A_qq.dot(Psi_q_T) +
                # r_q_pp
                A_qq.dot(Psi_Er_T) +
                # r_q_ppp
                A_qq.dot(
                    sum([(Psi_Ew[i] +
                          (Psi_E_T - Psi_E[i]).dot(Psi_u[i] - Phi_w_inv[i] *
                                                   pulse[i])).dot(
                                                       self.Jinv_rxF[i])
                         for i in range(self.M)])))
            A_vq = Psi_vq_T
            A_vw = Psi_vqw_T
            B_v = (
                # B_v_p
                self.minv * np.column_stack(
                    [thrust_vec_inertial[i] for i in range(self.M)]) +
                # B_v_pp
                np.column_stack([(Psi_vqw_T - Psi_vqw[i] -
                                  (Psi_vq_T - Psi_vq[i]).dot(Psi_E[i])).dot(
                                      Phi_w_inv[i]).dot(self.Jinv_rxF[i])
                                 for i in range(self.M)]))
            r_v = (
                # r_v_p
                Psi_vrp_T +
                # r_v_pp
                self.minv * (sum(Psi_F) - sum(
                    [thrust_vec_inertial[i] * pulse[i]
                     for i in range(self.M)])) +
                # r_v_ppp
                Psi_vrppp_T +
                # r_v_pppp
                Psi_vrpppp_T +
                # r_v_ppppp
                sum([(Psi_vqu[i] +
                      (Psi_vq_T - Psi_vq[i]).dot(Psi_Ew[i] -
                                                 Psi_E[i].dot(Psi_u[i])) +
                      (Psi_vqw_T - Psi_vqw[i]).dot(Psi_u[i]) -
                      (Psi_vqw_T - Psi_vqw[i] -
                       (Psi_vq_T - Psi_vq[i]).dot(Psi_E[i])).dot(
                           Phi_w_inv[i]) * pulse[i]).dot(self.Jinv_rxF[i])
                     for i in range(self.M)]))
            T = t_star[k + 1] - t_star[k]
            A_pv = np.eye(3) * T
            A_pq = Psi_pvq_T
            A_pw = Psi_pw2_T
            B_p = (
                # B_p_p
                self.minv *
                np.column_stack([(T - pulse[i]) * thrust_vec_inertial[i]
                                 for i in range(self.M)]) +
                # B_p_pp
                np.column_stack(
                    [(Psi_pw2_T - Psi_pw2[i] - (T - pulse[i]) * Psi_vqw[i] -
                      (Psi_pvq_T - Psi_pvq[i] -
                       (T - pulse[i]) * Psi_vq[i]).dot(Psi_E[i])).dot(
                           Phi_w_inv[i]).dot(self.Jinv_rxF[i])
                     for i in range(self.M)]))
            r_p = (
                # r_p_p
                Psi_pvr_T +
                # r_p_pp
                self.minv * sum([
                    Psi_prF[i] + (T - pulse[i]) *
                    (Psi_F[i] - thrust_vec_inertial[i] * pulse[i])
                    for i in range(self.M)
                ]) +
                # r_p_ppp
                Psi_pvrppp_T +
                # r_p_pppp
                Psi_pEr2_T +
                # r_p_ppppp
                sum([(Psi_pvqu[i] + (T - pulse[i]) * Psi_vqu[i] +
                      (Psi_pvq_T - Psi_pvq[i] -
                       (T - pulse[i]) * Psi_vq[i]).dot(Psi_Ew[i]) +
                      (Psi_pw2_T - Psi_pw2[i] - (T - pulse[i]) * Psi_vqw[i] -
                       (Psi_pvq_T - Psi_pvq[i] -
                        (T - pulse[i]) * Psi_vq[i]).dot(Psi_E[i])
                       ).dot(Psi_u[i] - Phi_w_inv[i] * pulse[i])).dot(
                           self.Jinv_rxF[i]) for i in range(self.M)]))
            # Concatenate
            Ad[k] = np.block([[np.eye(3), A_pv, A_pq, A_pw],
                              [np.zeros((3, 3)),
                               np.eye(3), A_vq, A_vw],
                              [np.zeros((4, 6)), A_qq, A_qw],
                              [np.zeros((3, 10)), A_ww]])
            Bd[k] = np.row_stack([B_p, B_v, B_q, B_w])
            rd[k] = np.concatenate([r_p, r_v, r_q, r_w])

        return Ad, Bd, rd, x_nl
Пример #11
0
    def set_parameters(self):
        # Unit converter functions
        def convert_units(parameters, converter):
            """
            Converts the units of parameters using the converter oracle.

            Parameters
            ----------
            parameters : {float,np.array,dict}
                parameters.
            converter : callable
                Callable function converter(parameter) where parameter (type:
                float,np.array) is the parameter in its original units, which
                outputs the parameter in the units produced by the converter.

            Returns
            -------
            new_parameters : {float,np.array,dict}
                parameters in the new units.
            """
            if type(parameters) is dict:
                keys = parameters.keys()
                new_parameters = {k: None for k in keys}
                for key in keys:
                    new_parameters[key] = converter(parameters[key])
            else:
                new_parameters = converter(parameters)
            return new_parameters

        in2m = in2m = lambda x: x * 0.0254  # inches to meters
        ft2slug2m2kg = lambda x: x * 1.35581795  # slug*ft^2 to kg*m^2
        lb2kg = lambda x: x * 0.453592  # lb to kg
        lbf2N = lambda x: x * 4.448222  # lbf to N

        # Rotation primitives (angles to be given in **degrees**)
        cd = lambda angle: np.cos(np.deg2rad(angle))
        sd = lambda angle: np.sin(np.deg2rad(angle))
        R_x = lambda a: np.array([[1., 0., 0.], [0., cd(a), -sd(a)],
                                  [0., sd(a), cd(a)]])
        R_z = lambda a: np.array([[cd(a), -sd(a), 0.], [sd(
            a), cd(a), 0.], [0., 0., 1.]])

        # Total mass
        self.m = convert_units(66850.6, lb2kg)
        self.minv = 1. / self.m

        # Inertia tensor in dynamical coordinates (i.e. body frame - same as
        # Apollo coordinates, but origin at center of mass)
        J_xx, J_yy, J_zz = 36324., 80036., 81701.
        J_xy, J_xz, J_yz = -2111., 273., 2268.
        self.J = np.array([[J_xx, -J_xy, -J_xz], [-J_xy, J_yy, -J_yz],
                           [-J_xz, -J_yz, J_zz]])
        self.J = convert_units(self.J, ft2slug2m2kg)
        self.Jinv = la.inv(self.J)

        # Maximum thrust of each thruster
        self.thrust_max = convert_units(100., lbf2N)

        # Minimum and maximum thrust pulse duration [s]
        self.t_pulse_min = 100e-3
        self.t_pulse_max = 500e-3

        ## CSM geometry below
        # {A} : CSM coordinate frame (centered behind CG)
        # {C} : CSM dynamical coordinate frame (centered at CG)

        # Thruster chamber root positions in quad frame
        Q_r_Q = dict(p_f=np.array([6.75, 0., 0.]),
                     p_a=np.array([-6.75, 0., 0.]),
                     r_f=np.array([0.94, 0., 3.125]),
                     r_a=np.array([-0.94, 0., -3.125]))
        Q_r_Q = convert_units(Q_r_Q, in2m)

        # Thruster vectors in quad frame
        e_x = np.array([1., 0., 0.])
        e_z = np.array([0., 0., 1.])
        self.thruster_cant_angle = 10.
        Q_ = dict(p_f=R_z(self.thruster_cant_angle).dot(e_x),
                  p_a=R_z(-self.thruster_cant_angle).dot(-e_x),
                  r_f=R_x(self.thruster_cant_angle).dot(e_z),
                  r_a=R_x(-self.thruster_cant_angle).dot(-e_z))

        # Quad positions in RCS frame
        R_r_RQ = dict(A=np.array([958.97, 0., -83.56]),
                      B=np.array([958.97, 83.56, 0.]),
                      C=np.array([958.97, 0., 83.56]),
                      D=np.array([958.97, -83.56, 0.]))
        R_r_RQ = convert_units(R_r_RQ, in2m)

        # Center of mass position of CSM in Apollo frame
        A_r_AM = convert_units(np.array([933.9, 5.0, 4.7]), in2m)

        # RCS frame orientation wrt Apollo frame
        R_AR = R_x(-(7. + 15. / 60.))

        # Quad frame orientations wrt RCS frame
        R_RQ = dict(A=R_x(-90.), B=R_x(0.), C=R_x(90.), D=R_x(180.))

        # Position of thrust chamber root and thrust vector of each thruster wrt
        # CG, in dynamical coordinates
        self.M = 4 * 4  # 4 thrusters per quad, 4 quads on SM
        self.thruster_position = np.empty((self.M, 3))  # thruster positions
        self.thruster_vector = np.empty((self.M, 3))  # thruster vectors
        self.i2thruster = {i: None for i in range(self.M)}
        self.thruster2i = dict()
        i = 0
        for quad in ['A', 'B', 'C', 'D']:
            for thruster in ['p_f', 'p_a', 'r_f', 'r_a']:
                # r = A_r_M(thruster # of quad #)
                self.thruster_position[i] = (
                    R_AR.dot(R_r_RQ[quad] + R_RQ[quad].dot(Q_r_Q[thruster])) -
                    A_r_AM)
                self.thruster_vector[i] = -R_AR.dot(R_RQ[quad]).dot(
                    Q_[thruster])
                self.i2thruster[i] = (quad, thruster)
                self.thruster2i[quad + ' ' + thruster] = i
                i += 1
        # precompute J^{-1}(r_i\cross F_i) terms that will be useful for the
        # dynamics
        self.Jinv_rxF = [
            self.Jinv.dot(
                np.cross(self.thruster_position[i],
                         self.thruster_vector[i] * self.thrust_max))
            for i in range(self.M)
        ]

        # LM interface position in Apollo frame
        A_r_AI = convert_units(np.array([1110.25, 0., 0.]), in2m)

        # LM interface position of CSM, wrt CG, in CSM dynamical coordinates
        C_r_CI = A_r_AI - A_r_AM
        self.r_dock_csm = C_r_CI

        ## LM geometry below
        # {E} : LM coordinate frame (centered behind CG)
        # {L} : LM dynamical coordinate frame (centered at CG)

        # CSM interface in LM frame
        E_r_EI = convert_units(np.array([312.5, 0., 0.]), in2m)

        # Docking orientation (Apollo frame to LM frame)
        angle_dock = np.deg2rad(60.)
        R_EA = np.array([[-1., 0., 0.],
                         [0., np.cos(angle_dock),
                          np.sin(angle_dock)],
                         [0., np.sin(angle_dock), -np.cos(angle_dock)]])
        q_EA = tools.rot2q(R_EA)
        self.q_dock = q_EA

        # Center of mass position of LM in LM frame
        A_r_AL = convert_units(np.array([1238.2, -0.6, 0.8]), in2m)
        E_r_EL = tools.rotate(A_r_AL - A_r_AM - C_r_CI, q_EA) + E_r_EI

        # CSM interface position of LM, wrt CG, in LM dynamical coordinates
        L_r_LI = E_r_EI - E_r_EL
        self.r_dock_lm = L_r_LI

        ## Other quantities
        # DLTV creation conglomerated state for dynamic quantities
        self.dltv_z0 = []
        self.dltv_map = dict()

        def add_term(initial_value, name):
            """
            Add new term to concatenated state.

            Parameters
            ----------
            initial_value : np.ndarray
                Initial value of the dynamic variable.
            name : str
                Name to use of the dynamica variable.
            """
            idx_start = len(self.dltv_z0)
            self.dltv_z0 += initial_value.flatten().tolist()
            idx_end = len(self.dltv_z0)
            self.dltv_map[name] = dict(idx=range(idx_start, idx_end),
                                       sz=initial_value.shape)

        def get_term(z, name):
            """Extract term from conglomerated state."""
            return z[self.dltv_map[name]['idx']].reshape(
                self.dltv_map[name]['sz'])

        add_term(np.eye(3), 'Phi_w')
        add_term(np.zeros(3), 'Psi_r')
        add_term(np.zeros((3, 3)), 'Psi_u')
        add_term(np.eye(4), 'Phi_q')
        add_term(np.zeros((4, 3)), 'Psi_E')
        add_term(np.zeros(4), 'Psi_q')
        add_term(np.zeros(4), 'Psi_Er')
        add_term(np.zeros((4, 3)), 'Psi_Ew')
        add_term(np.zeros((3, 4)), 'Psi_vq')
        add_term(np.zeros((3, 3)), 'Psi_vqw')
        add_term(np.zeros((3, 3)), 'Psi_vqu')
        add_term(np.zeros(3), 'Psi_vrp')
        add_term(np.zeros(3), 'Psi_vrppp')
        add_term(np.zeros(3), 'Psi_vrpppp')
        add_term(np.zeros(3), 'Psi_pvr')
        add_term(np.zeros((3, 16)), 'Psi_F')
        add_term(np.zeros((3, 16)), 'Psi_prF')
        add_term(np.zeros((3, 4)), 'Psi_pvq')
        add_term(np.zeros(3), 'Psi_pvrppp')
        add_term(np.zeros((3, 3)), 'Psi_pw2')
        add_term(np.zeros(3), 'Psi_pEr2')
        add_term(np.zeros((3, 3)), 'Psi_pvqu')

        self.dltv_z0 = np.array(self.dltv_z0)
        self.dltv_zget = get_term
Пример #12
0
def fix_box_angle(target_angle: float, mline_box: List, option: Dict) -> List:
    line_matrix_left = linearLineMatrix(mline_box[0], mline_box[2])
    line_matrix_right = linearLineMatrix(mline_box[1], mline_box[3])
    origin = detectIntersection(line_matrix_left, line_matrix_right)
    angle = target_angle - option['angle']
    return [rotate(origin, point, angle) for point in mline_box]
Пример #13
0
    def get_arrival_point(self, start, robot_current, robot_final, final_orientation):
        zone_corners_landing = self.get_corners("landing")
        leg_rest_vector = np.array([0., -1])
        F = - self.fix_position + robot_final - robot_current + tools.rotate((self.fix_position + leg_rest_vector*R_repos),
                                                                             final_orientation)
        if np.dot(leg_rest_vector, (F/norm(F))) >= np.dot(leg_rest_vector, (zone_corners_landing[3]/norm(zone_corners[3]))):
            print "Found direct flight for final position"
            return np.array(F)
        side = None
        for corner in zone_corners:
            if np.dot(corner-np.array(F), rotate(u, [0, 1])) > 0:
                if side == None:
                    side = 1
                elif side == -1:
                    side = 0
                else:
                    if side == None:
                        side = -1
                    elif side == 1:
                        side = 0
        entities = [['C', [0., 0.], Rmax-Rdiff*self.ratios["landing"]],
                    ['C', [0., 0.], Rmin+Rdiff*self.ratios["landing"]],
                    ['S', zone_corners[0], zone_corners[1]],
                    ['S', zone_corners[2], zone_corners[3]]]
        if side == 0:
            print "Found final line in zone"

            final_line = ['L', F, u]
            inter_kept = []
            for entity in entities:
                inters = tools.intersect(final_line, entity)
                print "{0} intersection(s) between {1} and {2}".format(len(inters), final_line, entity)
                for tmp in inters:
                    print "Possible landing point from {1} : {0}".format(tmp, start)
                    if entity[0] == 'C':
                        print "Cosinus is {2} compared to {3}".format(np.dot(leg_rest_vector, (tmp/norm(tmp))), np.dot(leg_rest_vector, (zone_corners[3]/norm(zone_corners[3]))))
                    if entity[0] == 'S' or np.dot(leg_rest_vector, (tmp-1*np.array(zone_center))/norm(np.array(zone_center)-tmp)) >= np.dot(leg_rest_vector, (zone_corners[3]-np.array(zone_center))/norm(np.array(zone_center)-zone_corners[3])):
                        inter_kept += [tmp]
                        print "Kept :"
                        print zone_center, tmp, -1*np.array(zone_center)+tmp
            dists = []
            for inter in inter_kept:
                dists += [norm(inter-np.array(F))]
            final_point = inter_kept[dists.index(min(dists))]
            print "Found direct fight for final parallel line"
            print final_point
            return final_point
        else:
            print "Computing tmp position before final line"
            print "Computing pure rotating movement"
            C_rot = ['C', np.array(O), norm(np.array(O)-np.array(I))]
            intersections = []
            inter_kept = []
            for entity in entities:
                inters = intersect(C_rot, entity)
                print "{0} intersection(s) between {1} and {2}".format(len(inters), C_rot, entity)
                for tmp in inters:
                    print "Possible landing point : {0} from {1}, cosinus is {2} compared to {3}".format(tmp, I, np.dot(leg_rest_vector, (tmp-np.array(zone_center))/norm(np.array(zone_center)-tmp)), np.dot(leg_rest_vector, (zone_corners[3]-np.array(zone_center))/norm(np.array(zone_center)-zone_corners[3])))
                    if entity[0] == 'S' or np.dot(leg_rest_vector, tmp/norm(tmp)) >= np.dot(leg_rest_vector, zone_corners[3]/norm(zone_corners[3])):
                        inter_kept += [tmp]
                        print "Kept :"
            dists = []
            for inter in inter_kept:
                dists += [norm(inter-np.array(I))]
            v1 = inter_kept[dists.index(max(dists))] - np.array(I)
            print "v1 : {0}".format(v1)
            
            print "Computing permanent rotating movement"
            S_direct = ['S', np.array(I), np.array(F)]
            intersections = []
            inter_kept=[]
            for entity in entities:
                inters = intersect(S_direct, entity)
                print "{0} intersection(s) between {1} and {2}".format(len(inters), S_direct, entity)
                for tmp in inters:
                    print "Possible landing point : {0} from {1}, cosinus is {2} compared to {3}".format(tmp, I, np.dot(leg_rest_vector, (tmp-np.array(zone_center))/norm(np.array(zone_center)-tmp)), np.dot(leg_rest_vector, (zone_corners[3]-np.array(zone_center))/norm(np.array(zone_center)-zone_corners[3])))
                    if entity[0] == 'S' or np.dot(leg_rest_vector, tmp-1/norm(tmp)) >= np.dot(leg_rest_vector, zone_corners[3]/norm(zone_corners[3])):
                        inter_kept += [tmp]
                        print "Kept :"
            dists = []
            for inter in inter_kept:
                dists += [norm(inter-np.array(F))]
            v2 = inter_kept[dists.index(min(dists))] - np.array(I)
            print "v2 : {0}".format(v2)
            
            print "Computing final point considering v_mean"
            v_mean = v2
            #v_mean = facteur_rotation_mouvement * v2 + (1 - facteur_rotation_mouvement)*v1
            tmp_line = ['L', I, v_mean]
            inter_kept = []
            for entity in entities:
                inters = intersect(tmp_line, entity)
                print "{0} intersection(s) between {1} and {2}".format(len(inters), tmp_line, entity)
                for tmp in inters:
                    print "Possible landing point : {0} from {1}, cosinus is {2} compared to {3}".format(tmp, I, np.dot(leg_rest_vector, (tmp-np.array(zone_center))/norm(np.array(zone_center)-tmp)), np.dot(leg_rest_vector, (zone_corners[3]-np.array(zone_center))/norm(np.array(zone_center)-zone_corners[3])))
                    if entity[0] == 'S' or np.dot(leg_rest_vector, (tmp-1*np.array(zone_center))/norm(np.array(zone_center)-tmp)) >= np.dot(leg_rest_vector, (zone_corners[3]-np.array(zone_center))/norm(np.array(zone_center)-zone_corners[3])):
                        inter_kept += [tmp]
                        print "Kept :"
                        print zone_center, tmp, -1*np.array(zone_center)+tmp
            dists = []
            for inter in inter_kept:
                dists += [norm(inter-np.array(F))]
            print "Arrival point from {0} : {1}".format(I, inter_kept[dists.index(min(dists))])
            ideal_point = inter_kept[dists.index(min(dists))]
                                                            
            return final_point
                                                        
Пример #14
0
def model_profiles(nber_clus_fit,
                   nber_rand_fit,
                   map_params,
                   l,
                   cl,
                   mass_int,
                   c200c,
                   z,
                   centroid_shift_value=0,
                   cl_extragal=None,
                   bl=None,
                   cl_noise=None,
                   use_magnitude_weights=True,
                   use_noise_weights=False,
                   apply_noise=True):

    nx, dx, ny, dy = map_params
    if cl_noise is None:
        cl_noise = np.zeros(max(l) + 1)

    mass_int = np.copy(mass_int) * 1e14

    cutouts_clus_arr = []
    magnitude_weights_clus_arr = []
    for i in tqdm(range(nber_clus_fit)):
        sim = sims.cmb_mock_data(map_params, l, cl)
        x_shift, y_shift = np.random.normal(
            loc=0.0, scale=centroid_shift_value), np.random.normal(
                loc=0.0, scale=centroid_shift_value)
        centroid_shift = [x_shift, y_shift]
        for j in range(len(mass_int)):
            kappa_map = lensing.NFW(mass_int[j], c200c, z,
                                    1100).convergence_map(
                                        map_params,
                                        centroid_shift=centroid_shift)
            alpha_vec = lensing.deflection_from_convergence(
                map_params, kappa_map)
            sim_lensed = lensing.lens_map(map_params, sim, alpha_vec)
            sim_lensed_noise = np.copy(sim_lensed)
            total_noise_map = tools.make_gaussian_realization(
                map_params, l, cl_noise)
            sim_lensed_noise += total_noise_map
            if bl is not None:
                sim_lensed = tools.convolve(sim_lensed,
                                            l,
                                            np.sqrt(bl),
                                            map_params=map_params)
                sim_lensed_noise = tools.convolve(sim_lensed_noise,
                                                  l,
                                                  np.sqrt(bl),
                                                  map_params=map_params)
            if apply_noise is False:
                sim_lensed_noise = np.copy(sim_lensed)
            cutout = tools.central_cutout(map_params, sim_lensed, 10)
            wiener_filter = tools.wiener_filter(l, cl, cl_noise)
            filtered_map = tools.convolve(sim_lensed_noise, l, wiener_filter,
                                          map_params)
            low_pass_filter = tools.low_pass_filter(l, 2000)
            filtered_map = tools.convolve(filtered_map, l, low_pass_filter,
                                          map_params)
            filtered_cutout = tools.central_cutout(map_params, filtered_map, 6)
            _, _, magnitude, angle = tools.gradient(filtered_cutout, dx)
            angle, magnitude_weight = np.median(angle), np.median(magnitude)
            cutout_aligned = tools.rotate(cutout, angle)
            cutout_aligned -= np.median(cutout_aligned)
            if use_magnitude_weights is False:
                magnitude_weight = 1
            cutouts_clus_arr.append(cutout_aligned * magnitude_weight)
            magnitude_weights_clus_arr.append(magnitude_weight)

    cutouts_rand_arr = []
    magnitude_weights_rand_arr = []
    for i in tqdm(range(nber_rand_fit)):
        sim = sims.cmb_mock_data(map_params, l, cl)
        sim_noise = np.copy(sim)
        total_noise_map = tools.make_gaussian_realization(
            map_params, l, cl_noise)
        sim_noise += total_noise_map
        if bl is not None:
            sim = tools.convolve(sim, l, np.sqrt(bl), map_params=map_params)
            sim_noise = tools.convolve(sim_noise,
                                       l,
                                       np.sqrt(bl),
                                       map_params=map_params)
        if apply_noise is False:
            sim_noise = np.copy(sim)
        cutout = tools.central_cutout(map_params, sim, 10)
        wiener_filter = tools.wiener_filter(l, cl, cl_noise)
        filtered_map = tools.convolve(sim_noise, l, wiener_filter, map_params)
        low_pass_filter = tools.low_pass_filter(l, 2000)
        filtered_map = tools.convolve(filtered_map, l, low_pass_filter,
                                      map_params)
        filtered_cutout = tools.central_cutout(map_params, filtered_map, 6)
        _, _, magnitude, angle = tools.gradient(filtered_cutout, dx)
        angle, magnitude_weight = np.median(angle), np.median(magnitude)
        cutout_aligned = tools.rotate(cutout, angle)
        cutout_aligned -= np.median(cutout_aligned)
        cutouts_rand_arr.append(cutout_aligned)
        magnitude_weights_rand_arr.append(magnitude_weight)
    if use_magnitude_weights is False:
        magnitude_weights_rand_arr = np.ones(nber_rand_fit)
    weighted_cutouts = [
        cutouts_rand_arr[i] * magnitude_weights_rand_arr[i]
        for i in range(nber_rand_fit)
    ]
    stack_bg = np.sum(weighted_cutouts,
                      axis=0) / np.sum(magnitude_weights_rand_arr)

    profile_models_arr = []
    for i in tqdm(range(len(mass_int))):
        stack_clus = np.sum(cutouts_clus_arr[i::len(mass_int)],
                            axis=0) / np.sum(
                                magnitude_weights_clus_arr[i::len(mass_int)])
        stack_dipole = stack_clus - stack_bg
        profile_model = np.mean(stack_dipole, axis=0)
        profile_models_arr.append(profile_model)

    return profile_models_arr
Пример #15
0
def compile_lrtop(data):
    """
    Create the local rendezvous trajectory optimization problem (LRTOP).

    Parameters
    ----------
    data : dict
        Problem data.
    """
    csm = data['csm']
    N = data['N']
    p_traj = data['state_traj_init'][:, :3]
    v_traj = data['state_traj_init'][:, 3:6]
    w_traj = data['state_traj_init'][:, 10:13]

    state_init = np.concatenate(
        [data['x0']['p'], data['x0']['v'], data['x0']['q'], data['x0']['w']])
    state_final = np.concatenate(
        [data['xf']['p'], data['xf']['v'], data['xf']['q'], data['xf']['w']])

    # Compute scaling terms
    # state (=P_x*state_scaled+p_x)
    p_center = np.mean(p_traj, axis=0)
    v_center = np.mean(v_traj, axis=0)
    q_center = np.zeros(4)
    w_center = np.mean(w_traj, axis=0)
    p_box = tools.bounding_box(p_traj - p_center)
    v_box = tools.bounding_box(v_traj - v_center)
    q_box = np.ones(4)
    w_box = tools.bounding_box(w_traj - w_center)
    p_x = np.concatenate([p_center, v_center, q_center, w_center])
    P_x = np.diag(np.concatenate([p_box, v_box, q_box, w_box]))
    P_x_inv = la.inv(P_x)
    data['scale_x'] = lambda x: P_x_inv.dot(x - p_x)
    # virtual control (=P_v*vc_scaled)
    p_box = tools.bounding_box(p_traj)
    v_box = tools.bounding_box(v_traj)
    q_box = np.ones(4)
    w_box = tools.bounding_box(w_traj)
    P_v = np.diag(np.concatenate([p_box, v_box, q_box, w_box]))
    # input (=P_u*input_scaled+p_u)
    p_u = np.array([0.5 * data['t_pulse_max'] for i in range(csm.M)])
    P_u = np.diag(p_u)
    # fuel (impulse) cost (=P_xi*xi_scaled+p_xi)
    # Heuristic: compute as having a quarter of the thrusters active at minimum
    # pulse width, the whole time
    mean_active_thruster_count = 0.25 * csm.M
    mean_pulse_width = csm.t_pulse_min
    p_xi = 0.
    P_xi = mean_active_thruster_count * mean_pulse_width * N

    # General quantites
    data['t_grid'] = np.linspace(0., data['t_f'],
                                 N + 1)  # discretization time grid
    n_x = data['state_traj_init'].shape[1]
    n_u = csm.M
    e = -tools.rotate(np.array([1., 0., 0.]),
                      csm.q_dock)  # dock axis in LM frame
    I_e = tools.rotate(e, data['lm']['q'])  # dock axis in inertial frame
    data['dock_axis'] = I_e

    # Optimization variables
    # non-dimensionalized
    x_hat = [cvx.Variable(n_x) for k in range(N + 1)]
    v_hat = [cvx.Variable(n_x) for k in range(N + 1)]
    xi_hat = cvx.Variable(N + 1)
    u_hat = [cvx.Variable(n_u) for k in range(N)]
    eta_hat = cvx.Variable(N)  # quadratic trust region size
    # dimensionalized (physical units)
    x = [P_x * x_hat[k] + p_x for k in range(N + 1)]  # unscaled state
    xi = P_xi * xi_hat + p_xi
    u = [P_u * u_hat[k] + p_u for k in range(N)]  # unscaled control
    v = [P_v * v_hat[k] for k in range(N)]  # virtual control
    data['lrtop_var'] = dict(x=x, u=u, xi=xi, v=v)

    # Optimization parameters
    A = [cvx.Parameter((n_x, n_x)) for k in range(N)]
    B = [cvx.Parameter((n_x, n_u)) for k in range(N)]
    r = [cvx.Parameter(n_x) for k in range(N)]
    u_lb = [cvx.Parameter(n_u, value=np.zeros(n_u)) for k in range(N)]
    stc_lb = [cvx.Parameter(n_u) for k in range(N)]
    stc_q = cvx.Parameter(N + 1)
    x_prev_hat = [cvx.Parameter(n_x) for k in range(N + 1)]
    data['lrtop_par'] = dict(A=A,
                             B=B,
                             r=r,
                             u_lb=u_lb,
                             stc_lb=stc_lb,
                             stc_q=stc_q,
                             x_prev_hat=x_prev_hat)

    # Cost
    # minimum-impulse
    J = xi_hat[-1]
    # trust region penalty
    J_tr = data['w_tr'] * sum(eta_hat)
    # virtual control penalty
    J_vc = data['w_vc'] * sum([cvx.norm1(v_hat[k]) for k in range(N)])
    data['lrtop_cost'] = dict(J=J, J_tr=J_tr, J_vc=J_vc)
    cost = cvx.Minimize(J + J_tr + J_vc)

    # constraints
    constraints = []
    constraints += [
        x[k + 1] == A[k] * x[k] + B[k] * u[k] + r[k] + v[k] for k in range(N)
    ]
    constraints += [xi[k + 1] == xi[k] + sum(u[k]) for k in range(N)]
    constraints += [x[0] == state_init, x[-1] == state_final, xi[0] == 0]
    constraints += [(x[k][:3] - data['lm']['p']).T * I_e >=
                    cvx.norm(x[k][:3] - data['lm']['p']) *
                    np.cos(np.deg2rad(data['gamma'])) for k in range(N + 1)]
    constraints += [u[k] <= data['t_pulse_max'] for k in range(N)]
    constraints += [u[k] >= u_lb[k] for k in range(N)]
    constraints += [
        u[k][i] == 0. for k in range(N) for i in range(n_u)
        if i in data['thrusters_off']
    ]
    constraints += [
        cvx.quad_form(x_hat[k + 1] - x_prev_hat[k + 1], np.eye(n_x)) <=
        eta_hat[k] for k in range(N)
    ]
    constraints += [
        stc_lb[k][i] * u[k][i] == 0 for k in range(N) for i in range(n_u)
    ]
    constraints += [
        stc_q[k] * u[k][i] == 0 for k in range(N) for i in range(n_u)
        if 'p_f' in csm.i2thruster[i]
    ]
    constraints += [
        stc_q[k + 1] *
        (tools.rqpmat(data['xf']['q'])[0].dot(np.diag([1, -1, -1, -1])) *
         x[k][6:10] - np.cos(0.5 * np.deg2rad(data['ang_app']))) <= 0
        for k in range(N)
    ]

    data['lrtop'] = cvx.Problem(cost, constraints)
Пример #16
0
def cmb_test_data(map_params,
                  l,
                  cl,
                  cluster=None,
                  centroid_shift_value=0,
                  cluster_corr_cutouts=None,
                  bl=None,
                  nl=None,
                  nber_obs=1,
                  estimator_validation=False,
                  noise_comparison=False,
                  clus_positions=False,
                  foreground_bias=False):

    if estimator_validation is True:
        sims_clus_arr = []
        kappa_maps = [
            lensing.NFW(cluster[i][0], cluster[i][1], cluster[i][2],
                        1100).convergence_map(map_params)
            for i in range(len(cluster))
        ]
        alpha_vecs = [
            lensing.deflection_from_convergence(map_params, kappa_maps[i])
            for i in range(len(cluster))
        ]
        for i in range(nber_obs):
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sims_lensed = [
                lensing.lens_map(map_params, sim, alpha_vecs[i])
                for i in range(len(cluster))
            ]
            if bl is not None:
                for j in range(len(sims_lensed)):
                    sims_lensed[j] = tools.convolve(sims_lensed[j],
                                                    l,
                                                    np.sqrt(bl),
                                                    map_params=map_params)
            if nl is not None:
                noise_map = tools.make_gaussian_realization(map_params, l, nl)
                for j in range(len(sims_lensed)):
                    sims_lensed[j] += noise_map
            sims_clus_arr.append(sims_lensed)
        sims_mass_sorted = []
        for i in range(len(cluster)):
            maps_at_mass_i = []
            for j in range(nber_obs):
                maps_at_mass_i.append(sims_clus_arr[j][i])
            sims_mass_sorted.append(maps_at_mass_i)
        return sims_mass_sorted

    if noise_comparison is True:
        sims_noise_arr = []
        kappa_map = lensing.NFW(cluster[0], cluster[1], cluster[2],
                                1100).convergence_map(map_params)
        alpha_vec = lensing.deflection_from_convergence(map_params, kappa_map)
        for i in range(nber_obs):
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sim_lensed = lensing.lens_map(map_params, sim, alpha_vec)
            if bl is not None:
                sim_lensed = tools.convolve(sim_lensed,
                                            l,
                                            np.sqrt(bl),
                                            map_params=map_params)
            sims_noise = [np.copy(sim_lensed) for i in range(len(nl))]
            noise_maps = [
                tools.make_gaussian_realization(map_params, l, nl[i])
                for i in range(len(nl))
            ]
            for i in range(len(sims_noise)):
                sims_noise[i] += noise_maps[i]
            sims_noise_arr.append(sims_noise)
        sims_noise_sorted = []
        for i in range(len(nl)):
            maps_at_noise_i = []
            for j in range(nber_obs):
                maps_at_noise_i.append(sims_noise_arr[j][i])
            sims_noise_sorted.append(maps_at_noise_i)
        return sims_noise_sorted

    if clus_positions is True:
        sims_clus_baseline, sims_clus_centroid_shift = [], []
        kappa_map_baseline = lensing.NFW(cluster[0], cluster[1], cluster[2],
                                         1100).convergence_map(map_params)
        alpha_vec_baseline = lensing.deflection_from_convergence(
            map_params, kappa_map_baseline)
        for i in range(nber_obs):
            x_shift, y_shift = np.random.normal(
                loc=0.0, scale=centroid_shift_value), np.random.normal(
                    loc=0.0, scale=centroid_shift_value)
            centroid_shift = [x_shift, y_shift]
            kappa_map_centroid_shift = lensing.NFW(
                cluster[0], cluster[1], cluster[2],
                1100).convergence_map(map_params, centroid_shift)
            alpha_vec_centroid_shift = lensing.deflection_from_convergence(
                map_params, kappa_map_centroid_shift)
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sim_lensed_baseline = lensing.lens_map(map_params, sim,
                                                   alpha_vec_baseline)
            sim_lensed_centroid_shift = lensing.lens_map(
                map_params, sim, alpha_vec_centroid_shift)
            if bl is not None:
                sim_lensed_baseline = tools.convolve(sim_lensed_baseline,
                                                     l,
                                                     np.sqrt(bl),
                                                     map_params=map_params)
                sim_lensed_centroid_shift = tools.convolve(
                    sim_lensed_centroid_shift,
                    l,
                    np.sqrt(bl),
                    map_params=map_params)
            if nl is not None:
                noise_map = tools.make_gaussian_realization(map_params, l, nl)
                sim_lensed_baseline += noise_map
                sim_lensed_centroid_shift += noise_map
            sims_clus_baseline.append(sim_lensed_baseline)
            sims_clus_centroid_shift.append(sim_lensed_centroid_shift)
        return sims_clus_baseline, sims_clus_centroid_shift

    if foreground_bias is True:
        fname = '/Volumes/Extreme_SSD/codes/master_thesis/code/data/mdpl2_cutouts_for_tszksz_clus_detection_M1.7e+14to2.3e+14_z0.6to0.8_15320haloes_boxsize10.0am_dx0.5am.npz'
        cutouts_dic = np.load(fname, allow_pickle=1,
                              encoding='latin1')['arr_0'].item()
        mass_z_key = list(cutouts_dic.keys())[0]
        cutouts = cutouts_dic[mass_z_key]
        scale_fac = fg.compton_y_to_delta_Tcmb(150, uK=True)
        tsz_cutouts, ksz_cutouts = [], []
        for kcntr, keyname in enumerate(cutouts):
            tsz_cutout = cutouts[keyname]['y'] * scale_fac
            tsz_cutouts.append(tsz_cutout)
            ksz_cutout = cutouts[keyname]['ksz'] * random.randrange(-1, 2, 2)
            ksz_cutouts.append(ksz_cutout)
        nx, dx, ny, dy = map_params
        cluster_corr_cutout = ksz_cutouts[0]
        nx_cutout, ny_cutout = cluster_corr_cutout.shape[
            0], cluster_corr_cutout.shape[1]
        s, e = int((nx - nx_cutout) / 2), int((ny + ny_cutout) / 2)


        sims_clus_baseline, sims_clus_tsz, sims_clus_ksz, sims_clus_tsz_ksz = [], [], [], []
        kappa_map = lensing.NFW(cluster[0], cluster[1], cluster[2],
                                1100).convergence_map(map_params)
        alpha_vec = lensing.deflection_from_convergence(map_params, kappa_map)
        for i in range(nber_obs):
            sim = tools.make_gaussian_realization(map_params, l, cl)
            sim_lensed = lensing.lens_map(map_params, sim, alpha_vec)
            sim_lensed_baseline, sim_lensed_tsz, sim_lensed_ksz, sim_lensed_tsz_ksz = np.copy(
                sim_lensed), np.copy(sim_lensed), np.copy(sim_lensed), np.copy(
                    sim_lensed)
            tsz_cutout = tools.rotate(
                tsz_cutouts[random.randint(0,
                                           len(tsz_cutouts) - 1)],
                random.randint(-180, 180))
            ksz_cutout = tools.rotate(
                ksz_cutouts[random.randint(0,
                                           len(ksz_cutouts) - 1)],
                random.randint(-180, 180))
            tsz_ksz_cutout = tsz_cutout + ksz_cutout
            sim_lensed_tsz[s:e, s:e] = sim_lensed_tsz[s:e, s:e] + tsz_cutout
            sim_lensed_ksz[s:e, s:e] = sim_lensed_ksz[s:e, s:e] + ksz_cutout
            sim_lensed_tsz_ksz[s:e,
                               s:e] = sim_lensed_tsz_ksz[s:e,
                                                         s:e] + tsz_ksz_cutout
            if bl is not None:
                sim_lensed_baseline = tools.convolve(sim_lensed_baseline,
                                                     l,
                                                     np.sqrt(bl),
                                                     map_params=map_params)
                sim_lensed_tsz = tools.convolve(sim_lensed_tsz,
                                                l,
                                                np.sqrt(bl),
                                                map_params=map_params)
                sim_lensed_ksz = tools.convolve(sim_lensed_ksz,
                                                l,
                                                np.sqrt(bl),
                                                map_params=map_params)
                sim_lensed_tsz_ksz = tools.convolve(sim_lensed_tsz_ksz,
                                                    l,
                                                    np.sqrt(bl),
                                                    map_params=map_params)
            if nl is not None:
                noise_map = tools.make_gaussian_realization(map_params, l, nl)
                sim_lensed_baseline += noise_map
                sim_lensed_tsz += noise_map
                sim_lensed_ksz += noise_map
                sim_lensed_tsz_ksz += noise_map
            sims_clus_baseline.append(sim_lensed_baseline)
            sims_clus_tsz.append(sim_lensed_tsz)
            sims_clus_ksz.append(sim_lensed_ksz)
            sims_clus_tsz_ksz.append(sim_lensed_tsz_ksz)
        return sims_clus_baseline, sims_clus_tsz, sims_clus_ksz, sims_clus_tsz_ksz