Exemple #1
0
 def drone_position_task(self, task):
     if self.calibrated:
         if task.frame == 0 or self.env.done:
             # in_state = np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
             in_state = None
             states, action = self.env.reset(in_state)
             self.network_in = self.aux_dl.dl_input(states, action)
             self.sensor.reset()
             pos = self.env.state[0:5:2]
             ang = self.env.ang
             self.a = np.zeros(4)
         else:
             action = self.policy.actor(torch.FloatTensor(self.network_in).to(device)).cpu().detach().numpy()
             states, _, done = self.env.step(action)
             time_iter = time.time()
             _, self.velocity_accel, self.pos_accel = self.sensor.accel_int()
             self.quaternion_gyro = self.sensor.gyro_int()
             self.ang_vel = self.sensor.gyro()
             quaternion_vel = deriv_quat(self.ang_vel, self.quaternion_gyro)
             self.pos_gps, self.vel_gps = self.sensor.gps()
             self.quaternion_triad, _ = self.sensor.triad()
             self.time_total_sens.append(time.time() - time_iter)
             
             #SENSOR CONTROL
             pos_vel = np.array([self.pos_accel[0], self.velocity_accel[0],
                                 self.pos_accel[1], self.velocity_accel[1],
                                 self.pos_accel[2], self.velocity_accel[2]])
             if REAL_STATE_CONTROL:
                 self.network_in = self.aux_dl.dl_input(states, [action])
             else:
                 states_sens = [np.concatenate((pos_vel, self.quaternion_gyro, quaternion_vel))]                
                 self.network_in = self.aux_dl.dl_input(states_sens, [action])
             
             error_i = np.concatenate((self.env.state[0:5:2]-self.pos_accel, self.env.state[6:10]-self.quaternion_gyro))
             self.error.append(error_i)   
             if len(self.error) > 1e4:
                 error_final = np.array(self.error)
                 time_total_img = np.mean(np.array(self.time_total_img))
                 time_total_sens = np.mean(np.array(self.time_total_sens))
                 print('Iteration Time: Img: %.5fms Sens: %.5fms' %(time_total_img*1000, time_total_sens*1000))
                 sys.exit()
             pos = self.env.state[0:5:2]
             ang = self.env.ang
             for i, w_i in enumerate(self.env.w):
                 self.a[i] += (w_i*time_int_step )*180/np.pi/30
     
         ang_deg = (ang[2]*180/np.pi, ang[0]*180/np.pi, ang[1]*180/np.pi)
         pos = (0+pos[0], 0+pos[1], 5+pos[2])
         
         self.quad.setHpr(*ang_deg)
         self.quad.setPos(*pos)
         self.cam.lookAt(self.quad)
         for v in self.env.w:
             if v<0:
                 print('negativo')
         self.prop_1.setHpr(self.a[0],0,0)
         self.prop_2.setHpr(self.a[1],0,0)
         self.prop_3.setHpr(self.a[2],0,0)
         self.prop_4.setHpr(self.a[3],0,0)
     return task.cont
 def gyro_int(self):
     w = self.gyro()
     q = self.quaternion_t0
     V_q = deriv_quat(w, q).flatten()
     for i in range(len(q)):
         q[i] = q[i] + V_q[i] * self.quad.t_step
     self.quaternion_t0 = q / np.linalg.norm(q)
     return q
    def drone_position_task(self, task):
        if task.frame == 0 or self.env.done:
            self.control_error_list = []
            self.estimation_error_list = []
            if self.HOVER:
                in_state = np.array([0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0])
            else:
                in_state = None
            states, action = self.env.reset(in_state)
            self.network_in = self.aux_dl.dl_input(states, action)
            self.sensor.reset()
            pos = self.env.state[0:5:2]
            ang = self.env.ang
            self.a = np.zeros(4)
            self.episode_n += 1
            print(f'Episode Number: {self.episode_n}')
        else:
            progress = self.env.i/self.env.n*100
            print(f'Progress: {progress:.2f}%', end='\r')
            action = self.policy.actor(torch.FloatTensor(self.network_in).to(device)).cpu().detach().numpy()
            states, _, done = self.env.step(action)
            time_iter = time.time()
            _, self.velocity_accel, self.pos_accel = self.sensor.accel_int()
            self.quaternion_gyro = self.sensor.gyro_int()
            self.ang_vel = self.sensor.gyro()
            quaternion_vel = deriv_quat(self.ang_vel, self.quaternion_gyro)
            self.pos_gps, self.vel_gps = self.sensor.gps()
            self.quaternion_triad, _ = self.sensor.triad()
            self.time_total_sens.append(time.time() - time_iter)
            
            #SENSOR CONTROL
            pos_vel = np.array([self.pos_accel[0], self.velocity_accel[0],
                                self.pos_accel[1], self.velocity_accel[1],
                                self.pos_accel[2], self.velocity_accel[2]])

            if self.REAL_CTRL:
                self.network_in = self.aux_dl.dl_input(states, [action])
            else:
                states_sens = [np.concatenate((pos_vel, self.quaternion_gyro, quaternion_vel))]                
                self.network_in = self.aux_dl.dl_input(states_sens, [action])
            
            pos = self.env.state[0:5:2]
            ang = self.env.ang
            for i, w_i in enumerate(self.env.w):
                self.a[i] += (w_i*time_int_step )*180/np.pi/10
    
        ang_deg = (ang[2]*180/np.pi, ang[0]*180/np.pi, ang[1]*180/np.pi)
        pos = (0+pos[0], 0+pos[1], 5+pos[2])
        
        # self.quad_model.setHpr((45, 0, 45))
        # self.quad_model.setPos((5, 5, 6))
        self.quad_model.setPos(*pos)
        self.quad_model.setHpr(*ang_deg)
        for prop, a in zip(self.prop_models, self.a):
            prop.setHpr(a, 0, 0)
        return task.cont
    def drone_eq(self, t, x, action):
        """"
        Main differential equation, not used directly by the user, rather used in the step function integrator.
        Dynamics based in: 
            MODELAGEM DINÂMICA E CONTROLE DE UM VEÍCULO AÉREO NÃO TRIPULADO DO TIPO QUADRIRROTOR 
            by ALLAN CARLOS FERREIRA DE OLIVEIRA
            BRASIL, SP-SANTO ANDRÉ, UFABC - 2019
        Incorporates:
            Drag Forces, Gyroscopic Forces
            In indirect mode: Force clipping (preventing motor shutoff and saturates over Thrust to Weight Ratio)
            In direct mode: maps [-1,1] to forces [0,T2WR*G*M/4]
        """
        self.w, f_in, m_action = self.f2F(action)

        #BODY INERTIAL VELOCITY
        vel_x = x[1]
        vel_y = x[3]
        vel_z = x[5]

        #QUATERNIONS
        q0 = x[6]
        q1 = x[7]
        q2 = x[8]
        q3 = x[9]

        #BODY ANGULAR VELOCITY
        w_xx = x[10]
        w_yy = x[11]
        w_zz = x[12]

        #QUATERNION NORMALIZATION (JUST IN CASE)
        q = np.array([[q0, q1, q2, q3]]).T
        q = q / np.linalg.norm(q)

        # DRAG FORCES ESTIMATION (BASED ON BODY VELOCITIES)
        self.mat_rot = quat_rot_mat(q)
        v_inertial = np.array([[vel_x, vel_y, vel_z]]).T
        v_body = np.dot(self.mat_rot.T, v_inertial)
        f_drag = -0.5 * RHO * C_D * np.multiply(
            A, np.multiply(abs(v_body), v_body))

        # DRAG MOMENTS ESTIMATION (BASED ON BODY ANGULAR VELOCITIES)

        #Discretization over 10 steps (linear velocity varies over the body)
        d_xx = np.linspace(0, D, 10)
        d_yy = np.linspace(0, D, 10)
        d_zz = np.linspace(0, D, 10)
        m_x = 0
        m_y = 0
        m_z = 0
        for xx, yy, zz in zip(d_xx, d_yy, d_zz):
            m_x += -RHO * C_D * BEAM_THICKNESS * D / 10 * (abs(xx * w_xx) *
                                                           (xx * w_xx))
            m_y += -RHO * C_D * BEAM_THICKNESS * D / 10 * (abs(yy * w_yy) *
                                                           (yy * w_yy))
            m_z += -2 * RHO * C_D * BEAM_THICKNESS * D / 10 * (abs(zz * w_zz) *
                                                               (zz * w_zz))

        m_drag = np.array([[m_x], [m_y], [m_z]])

        #GYROSCOPIC EFFECT ESTIMATION (BASED ON ELETRIC MOTOR ANGULAR VELOCITY)
        omega_r = (-self.w[0] + self.w[1] - self.w[2] + self.w[3])[0]

        m_gyro = np.array([[-w_xx * I_R * omega_r], [+w_yy * I_R * omega_r],
                           [0]])

        #BODY FORCES
        self.f_in = np.array([[0, 0, f_in]]).T
        self.f_body = self.f_in + f_drag

        #BODY FORCES ROTATION TO INERTIAL
        self.f_inertial = np.dot(self.mat_rot, self.f_body)

        #INERTIAL ACCELERATIONS
        accel_x = self.f_inertial[0, 0] / M
        accel_y = self.f_inertial[1, 0] / M
        accel_z = self.f_inertial[2, 0] / M - G
        self.accel = np.array([[accel_x, accel_y, accel_z]]).T

        #BODY MOMENTUM
        W = np.array([[w_xx], [w_yy], [w_zz]])

        m_in = m_action + m_gyro + m_drag - np.cross(
            W.flatten(),
            np.dot(J, W).flatten()).reshape((3, 1))

        #INERTIAL ANGULAR ACCELERATION
        accel_ang = np.dot(self.inv_j, m_in).flatten()
        accel_w_xx = accel_ang[0]
        accel_w_yy = accel_ang[1]
        accel_w_zz = accel_ang[2]

        #QUATERNION ANGULAR VELOCITY (INERTIAL)

        self.V_q = deriv_quat(W, q).flatten()
        dq0 = self.V_q[0]
        dq1 = self.V_q[1]
        dq2 = self.V_q[2]
        dq3 = self.V_q[3]

        # RESULTS ORDER:
        # 0 x, 1 vx, 2 y, 3 vy, 4 z, 5 vz, 6 q0, 7 q1, 8 q2, 9 q3, 10 w_xx, 11 w_yy, 12 w_zz
        out = np.array([
            vel_x, accel_x, vel_y, accel_y, vel_z, accel_z, dq0, dq1, dq2, dq3,
            accel_w_xx, accel_w_yy, accel_w_zz
        ])
        return out