def __init__(self): super(NonlinearPredictiveController, self).__init__() self.nlp = NonlinearProgram( solver='ipopt', options={ 'fast_step_computation': 'yes', # default: 'no' 'fixed_variable_treatment': 'relax_bounds', 'linear_solver': 'ma27', 'max_cpu_time': 0.1, # [s] 'max_iter': 100, 'mu_strategy': "adaptive", # default is "monotone" # The adaptive strategy yields faster results in practice on the # COP and ZMP controller, while it seems to have no effect on # computation times of the (slower) Wrench controller. 'print_level': 0, # default: 5 'warm_start_init_point': 'yes', # default: 'no' })
def compute_Phi(self): nlp = NonlinearProgram() bc_integral_expr = 0. Phi_i = 0. # Phi_0 = 0 Phi_1 = None lambda_cost = 0. lambda_guess = self.omega_i**2 lambda_prev = self.omega_f**2 for i in xrange(self.N): Phi_next = nlp.new_variable( 'Phi_%d' % (i + 1), # from Phi_1 to Phi_N dim=1, init=[self.s_sq[i + 1] * lambda_guess], lb=[self.s_sq[i + 1] * self.lambda_min], ub=[self.s_sq[i + 1] * self.lambda_max]) if Phi_1 is None: Phi_1 = Phi_next bc_integral_expr += self.Delta[i] / (casadi_sqrt(Phi_next) + casadi_sqrt(Phi_i)) lambda_i = (Phi_next - Phi_i) / self.Delta[i] lambda_cost += ((lambda_i - lambda_prev))**2 lambda_prev = lambda_i nlp.add_constraint(Phi_next - Phi_i, lb=[self.Delta[i] * self.lambda_min], ub=[self.Delta[i] * self.lambda_max]) Phi_i = Phi_next Phi_N = Phi_next nlp.add_equality_constraint(bc_integral_expr, self.bc_integral) nlp.add_equality_constraint(Phi_1, self.Delta[0] * self.omega_f**2) nlp.add_equality_constraint(Phi_N, self.omega_i**2) nlp.extend_cost(lambda_cost) nlp.create_solver() Phi_1_N = nlp.solve() Phi = array([0.] + list(Phi_1_N)) # preprend Phi_0 = 0 if __debug__: assert len(Phi) == self.N + 1 self.solve_times.append(nlp.solve_time) self.optimal_found = nlp.optimal_found return Phi
class NonlinearPredictiveController(pymanoid.Process): def __init__(self): super(NonlinearPredictiveController, self).__init__() self.nlp = NonlinearProgram( solver='ipopt', options={ 'fast_step_computation': 'yes', # default: 'no' 'fixed_variable_treatment': 'relax_bounds', 'linear_solver': 'ma27', 'max_cpu_time': 0.1, # [s] 'max_iter': 100, 'mu_strategy': "adaptive", # default is "monotone" # The adaptive strategy yields faster results in practice on the # COP and ZMP controller, while it seems to have no effect on # computation times of the (slower) Wrench controller. 'print_level': 0, # default: 5 'warm_start_init_point': 'yes', # default: 'no' }) def add_com_boundary_constraint(self, contact, p, lb=0.5, ub=1.5): dist = casadi.dot(p - contact.p, contact.n) self.nlp.add_constraint(dist, lb=[lb], ub=[ub]) def add_friction_constraint(self, contact, p, z): mu = contact.friction ZG = p - z ZG2 = casadi.dot(ZG, ZG) ZGn2 = casadi.dot(ZG, contact.n)**2 slackness = ZG2 - (1 + mu**2) * ZGn2 self.nlp.add_constraint(slackness, lb=[-self.nlp.infty], ub=[0]) def add_linear_friction_constraints(self, contact, p, z): mu_inner = contact.friction / casadi.sqrt(2) ZG = p - z ZGt = casadi.dot(ZG, contact.t) ZGb = casadi.dot(ZG, contact.b) ZGn = casadi.dot(ZG, contact.n) c0 = ZGt - mu_inner * ZGn c1 = -ZGt - mu_inner * ZGn c2 = ZGb - mu_inner * ZGn c3 = -ZGb - mu_inner * ZGn self.nlp.add_constraint(c0, lb=[-self.nlp.infty], ub=[0]) self.nlp.add_constraint(c1, lb=[-self.nlp.infty], ub=[0]) self.nlp.add_constraint(c2, lb=[-self.nlp.infty], ub=[0]) self.nlp.add_constraint(c3, lb=[-self.nlp.infty], ub=[0]) def add_cop_constraint(self, contact, p, z, scaling=0.95): X = scaling * contact.X Y = scaling * contact.Y CZ, ZG = z - contact.p, p - z CZxZG = casadi.cross(CZ, ZG) Dx = casadi.dot(contact.b, CZxZG) / X Dy = casadi.dot(contact.t, CZxZG) / Y ZGn = casadi.dot(contact.n, ZG) slackness = Dx**2 + Dy**2 - ZGn**2 self.nlp.add_constraint(slackness, lb=[-self.nlp.infty], ub=[-0.005]) def add_linear_cop_constraints(self, contact, p, z, scaling=0.95): GZ = z - p for (i, v) in enumerate(contact.vertices): v_next = contact.vertices[(i + 1) % len(contact.vertices)] v = v + (1. - scaling) * (contact.p - v) v_next = v_next + (1. - scaling) * (contact.p - v_next) slackness = casadi.dot(v_next - v, casadi.cross(v - p, GZ)) self.nlp.add_constraint(slackness, lb=[-self.nlp.infty], ub=[-0.005]) @property def is_ready_to_switch(self): if self.nlp.optimal_found and self.cp_error < 2e-3: return True # success return False def print_results(self): dT = self.preview.dT T_swing = sum(dT[:self.nb_steps / 2]) T_total = sum(dT) print("\n") print("%14s: " % "dT's", [round(x, 3) for x in dT]) print("%14s: " % "dT_min", "%.3f s" % self.dT_min) if self.swing_duration is not None: print("%14s: " % "T_swing", "%.3f s" % T_swing) print("%14s: " % "TTHS", "%.3f s" % self.swing_duration) print("%14s: " % "swing_dT_min", "%.3f s" % self.swing_dT_min) print("%14s: " % "T_total", "%.2f s" % T_total) print("%14s: " % "CP error", self.cp_error) print("%14s: " % "Comp. time", "%.1f ms" % (1000 * self.nlp.solve_time)) print("%14s: " % "Iter. count", self.nlp.iter_count) print("%14s: " % "Status", self.nlp.return_status) print("\n") def update_dT_max(self, dT_max): """ Update the maximum variable-timestep duration. Parameters ---------- dT_max : scalar New maximum duration. """ for k in range(self.nb_steps): dT_min = self.dT_min if 2 * k < self.nb_steps: dT_min = self.swing_dT_min self.nlp.update_variable_bounds('dT_%d' % k, [dT_min], [dT_max]) self.dT_max = dT_max def update_dT_min(self, dT_min): """ Update the minimum variable-timestep duration. Parameters ---------- dT_min : scalar New minimum duration. """ dT_max = self.dT_max if dT_min < self.swing_dT_min: self.update_swing_dT_min(dT_min) for k in range(self.nb_steps / 2, self.nb_steps): self.nlp.update_variable_bounds('dT_%d' % k, [dT_min], [dT_max]) self.dT_min = dT_min def update_swing_dT_min(self, dT_min): """ Update the minimum variable-timestep duration for the swing phase. Parameters ---------- dT_min : scalar New minimum duration. Only affects the swing phase. """ dT_max = self.dT_max for k in range(self.nb_steps / 2): self.nlp.update_variable_bounds('dT_%d' % k, [dT_min], [dT_max]) self.swing_dT_min = dT_min def update_swing_duration(self, T): """ Update the duration of the swing phase. Parameters ---------- T : scalar New duration. """ if self.nlp.has_constraint('T_swing'): self.swing_duration = T self.nlp.update_constraint_bounds('T_swing', [T], [self.T_max])
def compute_Phi(self): nlp = NonlinearProgram() g = -gravity[2] bc_integral_expr = 0. Phi_i = 0. # Phi_0 = 0 Phi_1 = None lambda_cost = 0. lambda_f = self.omega_f**2 lambda_guess = lambda_f lambda_prev = lambda_f for i in xrange(self.N): Phi_lb = self.s_sq[i + 1] * self.lambda_min Phi_ub = self.s_sq[i + 1] * self.lambda_max if i == self.N - 1: if self.omega_i_min is not None: Phi_lb = max(Phi_lb, self.omega_i_min**2) if self.omega_i_max is not None: Phi_ub = min(Phi_ub, self.omega_i_max**2) Phi_next = nlp.new_variable( 'Phi_%d' % (i + 1), # from Phi_1 to Phi_N dim=1, init=[self.s_sq[i + 1] * lambda_guess], lb=[Phi_lb], ub=[Phi_ub]) if Phi_1 is None: Phi_1 = Phi_next bc_integral_expr += self.Delta[i] / (casadi_sqrt(Phi_next) + casadi_sqrt(Phi_i)) lambda_i = (Phi_next - Phi_i) / self.Delta[i] lambda_cost += ((lambda_i - lambda_prev))**2 lambda_prev = lambda_i nlp.add_constraint(Phi_next - Phi_i, lb=[self.Delta[i] * self.lambda_min], ub=[self.Delta[i] * self.lambda_max]) Phi_i = Phi_next Phi_N = Phi_next bc_cvx_obj = bc_integral_expr - (self.z_bar / g) * casadi_sqrt(Phi_N) nlp.add_equality_constraint(bc_cvx_obj, self.zd_bar / g) nlp.add_equality_constraint(Phi_1, self.Delta[0] * lambda_f) nlp.extend_cost(lambda_cost) nlp.create_solver() Phi_1_N = nlp.solve() Phi = array([0.] + list(Phi_1_N)) # preprend Phi_0 = 0 if __debug__: self.solve_times.append(nlp.solve_time) self.optimal_found = nlp.optimal_found return Phi