def p_patron(p): ''' patron : patron_A hyper_exp ''' global quadruples global variables global quadCont global ranges global patrons global tempCont global scopeTable up = pop_from_variables(p) low = ranges[-1] operator = pop_from_patrons(p) result = valid_operation(operator, low, up) if result == -1: print("Invalid operation at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad(operator, up, low, result)) variables.append(result) quadCont += 1 value = pop_from_variables(p) control_var = pop_from_ranges(p) quadruples.append(Quad('=', None, value, control_var)) quadCont += 1 returning = jumps.pop() goto = jumps.pop() quadruples.append(Quad('GoTo', None, None, goto)) quadCont += 1 quadruples[returning].result = quadCont p[0] = "" for x in range(1, len(p)): p[0] += str(p[x]) p[0]
def p_punt_function_call_end(p): ''' punt_function_call_end : empty ''' global quadruples global quadCont global funcName global scopeTable global variables global operators global actualScope global recursiveCalls if len(operators) > 0: operators.pop() dir = scopeTable.scopes[funcName][2] quadruples.append(Quad('GoSub', None, None, dir)) quadCont += 1 if scopeTable.scopes[funcName][0] != 'void': returnval = scopeTable.scopes[funcName][3] if funcName == actualScope: direction = calc_new_direction(scopeTable.scopes[funcName][0], actualScope) variables.append(direction) quadruples.append(Quad('SetReturnValue', None, None, direction)) recursiveCalls.append(quadCont) quadCont += 1 else: direction = calc_new_direction(scopeTable.scopes[actualScope][0], actualScope) quadruples.append(Quad('SetReturnValue', None, None, direction)) variables.append(direction) quadCont += 1 p[0] = p[1]
def p_function_D(p): # Return value for function ''' function_D : WITH hyper_exp | empty ''' global actualScope global scopeTable global quadruples global quadCont global funcType if funcType != 'void': result = pop_from_variables(p) result_type = get_type_by_direction(result) if funcType == result_type: quadruples.append(Quad('return',None,None,result)) scopeTable.scopes[actualScope][3] = result else: print("Type mismatch at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad('return',None,None,None)) quadCont += 1 quadruples.append(Quad('EndProc',None,None,None)) quadCont += 1 p[0] = "" for x in range(1, len(p)): p[0] += str(p[x]) p[0]
def p_loop_value(p): ''' loop_value : hyper_exp ''' global variables global conditions global quadruples global quadCont global jumps global ranges up = pop_from_variables(p) low = ranges[-1] operator = conditions.pop() result = valid_operation(operator, low, up) if result == -1: print("Invalid operation at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad(operator, up, low, result)) quadCont += 1 variables.append(result) jumps.append(quadCont) bool = pop_from_variables(p) quadruples.append(Quad('GoToF', None, bool, None)) quadCont += 1 p[0] = p[1]
def __init__(self, *args, **kwargs): self.boundary = '--boundarydonotcross' self.html = open('index.html', 'r').read() # Handle Arguments self.input_path = _args.i[0] self.rotation = _args.r[0] self.output_width = int(_args.c[0].split('x')[0]) self.output_height = int(_args.c[0].split('x')[1]) self.frame_rate = _args.f[0] self.frame_skip_rate = _args.k[0] self.recognize_scale = 0.2 self.predictor = dlib.shape_predictor('../shapes/shape_predictor_68_face_landmarks.dat') self.face_cascade = toolbox.loadCascade('haarcascade_frontalface_default.xml') # Define skipping vars self.skip_frames = None self.skip_frame_x = None self.skip_frame_y = None self.skip_rect = None self.skip_points = None # OpenGL self.quad = Quad() self.quad_program = ShaderProgram(fragment=self.quad.fragment, vertex=self.quad.vertex) self.quad.loadVBOs(self.quad_program) self.quad.loadElements() super(CustomHandler, self).__init__(*args, **kwargs)
def p_factor(p): ''' factor : minus value | OPEN_PARENTHESIS puntOP hyper_exp CLOSE_PARENTHESIS puntCP ''' global variables global scopeTable global actualScope global actual_value global quadCont global quadruples global operators global negativeflag if p[1] != '(' and check_if_exist(p[2]): variables.append(actual_value) if len(operators) > 0: if operators[-1] == '-' and negativeflag ==1: oper = pop_from_operators(p) var1 = pop_from_variables(p) result = valid_operation(oper, var1, var1) if result == -1: print("Invalid operation at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad(oper,var1,0,result)) variables.append(result) quadCont += 1 flag = 0 p[0] = "" for x in range(1, len(p)): p[0] += str(p[x]) p[0]
def initGL(output_width, output_height): glutInit(sys.argv) glutInitDisplayMode(GLUT_3_2_CORE_PROFILE | GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH) glutInitWindowSize(output_width, output_height) glutCreateWindow("Virtual Window") print('') print('Vendor: ' + glGetString(GL_VENDOR).decode('utf-8')) print('Renderer: ' + glGetString(GL_RENDERER).decode('utf-8')) print('OpenGL Version: ' + glGetString(GL_VERSION).decode('utf-8')) print('Shader Version: ' + glGetString(GL_SHADING_LANGUAGE_VERSION).decode('utf-8')) if not glUseProgram: print('Missing Shader Objects!') sys.exit(1) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) global QUAD QUAD = Quad() global QUAD_PROGRAM QUAD_PROGRAM = ShaderProgram(fragment=QUAD.fragment, vertex=QUAD.vertex) QUAD.loadVBOs(QUAD_PROGRAM) QUAD.loadElements()
def simulate_with_actions(state, N, action_schedule): quad_dev = Quad(state) trajectory = [state.copy()] action_schedule[:, 2] += M * G for i in range(N-1): action = action_schedule[i] state, _ = quad_dev.step(state, action) trajectory.append(state.copy()) return np.array(trajectory)
def p_punt_start_litaf(p): ''' punt_start_litaf : empty ''' global quadCont global quadruples create_scope('constants', 'void', quadCont, p) quadruples.append(Quad('GoTo',None,None,None)) quadCont += 1 p[0] = p[1]
def p_puntElseIfGoToF(p): ''' puntElseIfGoToF : empty ''' global quadruples global variables global quadCont global jumps jumps.append(quadCont) bool = pop_from_variables(p) quadruples.append(Quad('GoToF',None, bool, None)) quadCont += 1 p[0] = p[1]
def p_puntUntil(p): ''' puntUntil : empty ''' global variables global quadruples global quadCont global jumps result = pop_from_variables(p) quadruples.append(Quad('GoToF', None, result, None)) jumps.append(quadCont) quadCont += 1 p[0] = p[1]
def p_puntUntilEnd(p): ''' puntUntilEnd : empty ''' global quadruples global quadCont global jumps end = jumps.pop() # Pops GOTOF QUAD and fills the missing jump with actual counter quadruple quadCont += 1 quadruples[end].result = quadCont returning = jumps.pop() # Pops QUAD for generating GOTO QUAD to re evaluation of the conditional exp of the cycle quadruples.append(Quad('GoTo', None, None, returning)) p[0] = p[1]
def p_function_call_hyper_exp(p): ''' function_call_hyper_exp : punt_false_bottom_function hyper_exp ''' global quadCont global contParams global quadruples global variables global operators contParams += 1 param = pop_from_variables(p) quadruples.append(Quad('PARAM', None, param, 'param'+str(contParams))) quadCont += 1 p[0] = p[1]
def p_puntElse(p): ''' puntElse : empty ''' global quadruples global variables global quadCont global jumps quadruples.append(Quad('GoTo',None,None,None)) false = jumps.pop() jumps.append(quadCont) quadCont += 1 quadruples[false].result = quadCont p[0] = p[1]
def p_puntElseIfGOTO(p): ''' puntElseIfGOTO : empty ''' global quadruples global variables global quadCont global jumps returning = jumps.pop() jumps.append(quadCont) quadruples.append(Quad('GoTo', None, None, None)) quadCont += 1 quadruples[returning].result = quadCont p[0] = p[1]
def p_lecture_ID(p): ''' lecture_ID : ID ''' global quadruples global quadCont global actual_value if check_if_exist(p[1]): lec = actual_value quadruples.append(Quad('Lecture', None, None, lec)) quadCont += 1 else: print("Syntax error at line {}: undeclared variable '{}'".format(p.lexer.lineno, p[1])) sys.exit(0) p[0] = p[1]
def p_function_call_name(p): ''' function_call_name : FUNCTION_ID ''' global funcName global quadruples global scopeTable global funcName global block_flag global quadCont funcName = p[1] type = scopeTable.scopes[funcName][0] quadruples.append(Quad('ERA', None, None, funcName)) quadCont += 1 p[0] = p[1]
def p_writing_A(p): ''' writing_A : hyper_exp writing_A1 ''' global variables global writCont global output writCont = 0 message = pop_from_variables(p) output.append(Quad('Writing', None, None, message)) writCont += 1 p[0] = "" for x in range(1, len(p)): p[0] += str(p[x]) p[0]
def __init__(self, Stable, wm, quad_pos=(0, 0, 0)): self.world = World() self.world.setGravity((0, -9.81, 0)) self.space = Space() self.contactgroup = JointGroup() self.wm = wm # draw floor self.floor = GeomPlane(self.space, (0, 1, 0), -1) box(pos=(0, -1, 0), width=2, height=0.01, length=2) # Draw Quad self.quad = Quad(self.world, self.space, pos=quad_pos) # Init Algorithm self.stable = Stable(self.quad) # stop autoscale of the camera scene.autoscale = False
def p_bool_values_cycle(p): ''' bool_values_cycle : bool_values ''' global quadruples global quadCont global variables global tempCont up = pop_from_variables(p) low = scopeTable.scopes['constants'][1].vars[p[1]][1] operator = "==" result = valid_operation(operator, low, up) if result == -1: print("Invalid operation at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad(operator, up, low, result)) variables.append(result) quadCont += 1 p[0] = p[1]
def save_animation(self, animation_name): if animation_name != '': filename = os.path.join(self.dir, 'animations/') anim_file = open(filename + animation_name + '.txt', 'w') l_animation_queue = [] r_animation_queue = [] l_gripper_queue = [] r_gripper_queue = [] for i in range(self.list_widget.count()): item = self.list_widget.item(i) pose_name = item.text() anim_file.write( re.sub( ',', '', str(self.animation_map[pose_name].left).strip('[]') + ' ' + str(self.animation_map[pose_name].right).strip('[]'))) anim_file.write('\n') anim_file.write( str(self.animation_map[pose_name].left_gripper) + ' ' + str(self.animation_map[pose_name].right_gripper)) l_animation_queue.append(self.animation_map[pose_name].left) r_animation_queue.append(self.animation_map[pose_name].right) l_gripper_queue.append( self.animation_map[pose_name].left_gripper) r_gripper_queue.append( self.animation_map[pose_name].right_gripper) anim_file.write('\n') anim_file.close() self.saved_animations[animation_name] = Quad( l_animation_queue, r_animation_queue, l_gripper_queue, r_gripper_queue) self.saved_animations_list.addItem( animation_name) # Bug? Multiple entries # Reset the pending queue self.l_animation_queue = [] self.r_animation_queue = [] else: self.show_warning('Please insert name for animation')
def extraction_algorithm(fine_data, resolutions, dimensions): print "### Dual Contouring ###" [verts_out_dc, quads_out_dc, manifolds, datasets] = tworesolution_dual_contour(fine_data, resolutions, dimensions) print "### Dual Contouring DONE ###" N_quads = { 'coarse': quads_out_dc['coarse'].__len__(), 'fine': quads_out_dc['fine'].__len__() } N_verts = { 'coarse': verts_out_dc['coarse'].__len__(), 'fine': verts_out_dc['fine'].__len__() } quads = { 'coarse': [None] * N_quads['coarse'], 'fine': quads_out_dc['fine'] } verts = { 'coarse': verts_out_dc['coarse'], 'fine': verts_out_dc['fine'] } # todo substitute with vertex objects for i in range(N_quads['coarse']): quads['coarse'][i] = Quad(i, quads_out_dc['coarse'], verts_out_dc['coarse']) assert quads['coarse'].__len__() > 0 print "### Projecting Datapoints onto coarse quads ###" # do projection of fine verts on coarse quads param = create_parameters(verts, quads) #export_results.export_as_csv(param,'./PYTHON/NURBSReconstruction/DualContouring/plotting/param') print "### Projecting Datapoints onto coarse quads DONE ###" return verts_out_dc, quads_out_dc, quads, param, datasets
def p_puntSum(p): ''' puntSum : empty ''' global operators global quadruples global quadCont global variables if len(operators) > 0: if operators[-1] == '+' or operators[-1] == '-': operator = pop_from_operators(p) operand2 = pop_from_variables(p) operand1 = pop_from_variables(p) result = valid_operation(operator, operand1, operand2) if result == -1: print("Invalid operation at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad(operator, operand2, operand1, result)) variables.append(result) quadCont += 1 p[0] = p[1]
def p_assign(p): ''' assign : ID EQUAL appendEqual assign_A ''' global operators global quadruples global variables global quadCont global actual_value operator = pop_from_operators(p) operand2 = pop_from_variables(p) if check_if_exist(p[1]): operand1 = actual_value result = valid_operation(operator, operand1, operand2) if result == -1: print("Invalid operation at {}".format(p.lexer.lineno)) sys.exit(0) else: quadruples.append(Quad(operator, None, operand2, operand1)) quadCont += 1 p[0] = "" for x in range(1, len(p)): p[0] += str(p[x]) p[0]
from quad import Quad from time import sleep if __name__ == "__main__": q = Quad("quad.config") q[0].set(150, 150, 0) sleep(10)
def main(): simulation_time = 20 ## prepare the trajectory(course) to track idx = np.arange(0, 4 * simulation_time, dl) course_pos = curve_pos(idx) course_vel = curve_vel(idx) speed = 0.5 # m/s course_vel = course_vel / np.linalg.norm(course_vel, axis=0) * (speed + np.random.rand()) # normalize with the same speed, but not direction. course_vel[:, 0] = 0 # avoid nan course_vel[:, -1] = 0 # stop course = np.concatenate((course_pos, course_vel), axis=0) # desired state with dim 6, shape [dim, timeslot] # init state and target state init_state = course[:, 0] + np.array([1., 2., 0., 0., 0., 0.])#np.array([-1., 1., 0., 0., 0., 0.]) # state = init_state nearest, _ = calc_nearest_index(state, course, 0) target_states = course[:, nearest] # quadrotor object quad = Quad(init_state) # dimension nu = 4 nx = init_state.shape[0] # 6 # controller parameters setting for both lqr and linear mpc u = np.zeros(nu) horizon = 20 # predict and control horizon in mpc Q = np.array([20. , 20., 20., 8, 8, 8]) QN = Q # Q = np.array([8. , 8., 8., 0.8, 0.8, 0.8]) # QN = np.array([10., 10., 10., 1.0, 1.0, 1.0]) R = np.array([6., 6., 6.]) Ad, Bd = linearized_model(state) # actually use linear time-invariant model linearized from the equilibrium point # record state labels = ['x', 'y', 'z', 'vx', 'vy', 'vz'] # convenient for plot state_control = {x: [] for x in labels} target_chosen = {x: [] for x in labels} for ii in range(6): target_chosen[labels[ii]].append(target_states[ii]) err = [] # tracking error # visualization fig = plt.figure() ax = plt.axes(projection='3d') # control loop timestamp = 0.0 while timestamp <= simulation_time: print("timestamp", timestamp) if TRACKING: target_states, nearest, chosen_idxs = calc_ref_trajectory(state, course, nearest, horizon) else: target_states = np.array([0, 0, 10, 0, 0, 0]) if CONTROLLER=='MPC': mpc_policy = mpc(state, target_states, Ad, Bd, horizon, Q, QN, R, TRACKING) nominal_action, actions, nominal_state = mpc_policy.solve() action = nominal_action #- np.dot(K,(target_states - nominal_state)) roll_r, pitch_r, thrust_r = action u[0] = - pitch_r u[1] = - roll_r u[2] = 0.0 u[3] = thrust_r + M * G elif CONTROLLER=='LQR': lqr_policy = FiniteHorizonLQR(Ad, Bd, Q, R, QN, horizon=horizon) # instantiate a lqr controller lqr_policy.set_target_state(target_states) ## set target state to koopman observable state lqr_policy.sat_val = np.array([PI/8., PI/8., 0.75]) K, ustar = lqr_policy(state) u[0] = - ustar[1] u[1] = - ustar[0] u[2] = 0.0 u[3] = ustar[2] + M * G print("K", K) elif CONTROLLER=='PID': # Compute control errors x, y, z, dx, dy, dz = state x_r, y_r, z_r, dx_r, dy_r, dz_r = target_states ex = x - x_r ey = y - y_r ez = z - z_r dex = dx - dx_r dey = dy - dy_r dez = dz - dz_r xi = 1.2 wn = 3.0 Kp = - wn * wn Kd = - 2 * wn * xi Kxp = 1.2 * Kp Kxd = 1.2 * Kd Kyp = Kp Kyd = Kd Kzp = 0.8 * Kp Kzd = 0.8 * Kd pitch_r = Kxp * ex + Kxd * dex roll_r = Kyp * ey + Kyd * dey thrust_r = (Kzp * ez + Kzd * dez + 9.8) * 0.44 u[0] = pitch_r u[1] = roll_r u[2] = 0. u[3] = thrust_r + M * G next_state = quad.step(state, u) state = next_state timestamp = timestamp + dt if TRACKING: err.append(state[:3] - target_states[:3, -1]) else: err.append(state[:3] - target_states[:3]) # record for ii in range(len(labels)): state_control[labels[ii]].append(state[ii]) if TRACKING: target_chosen[labels[ii]].append(target_states[ii, -1]) else: target_chosen[labels[ii]].append(target_states[ii]) ## plot if True: #timestamp > dt: # # plot in real time plt.cla() x = np.append(init_state[0], state_control['x']) y = np.append(init_state[1], state_control['y']) z = np.append(init_state[2], state_control['z']) final_idx = len(x) - 1 ax.plot3D(x, y, z, label='Quadcopter flight trajectory') ax.plot3D([init_state[0]], [init_state[1]], [init_state[2]], label='Initial position', color='blue', marker='x') ax.plot3D([x[final_idx]], [y[final_idx]], [z[final_idx]], label='Final position', color='green', marker='o') if TRACKING: plt_len = chosen_idxs[-1] + 100 ax.plot3D(course[0, :plt_len], course[1, :plt_len], course[2, :plt_len]) # ax.scatter3D([course[0, chosen_idxs[0]]], [course[1, chosen_idxs[0]]], [course[2, chosen_idxs[0]]], color='red', label='chosen target flight trajectory', # marker='o') for i in range(horizon): ax.scatter3D([course[0, chosen_idxs[i]]], [course[1, chosen_idxs[i]]], [course[2, chosen_idxs[i]]], color='red', marker='o') ax.scatter3D([target_states[0]], [target_states[1]], [target_states[2]], color='red', marker='o') plt.legend() plt.pause(0.01) err = np.stack(err) plt.figure() plt.plot(err[:,0], label='x') plt.plot(err[:,1], label='y') plt.plot(err[:,2], label='z') plt.legend() plt.show()
def main(): parser = argparse.ArgumentParser() parser.add_argument("--epoch_size", type=int, default=10, help="Total training epochs") parser.add_argument("--episode_size", type=int, default=50000, help="Training episodes per epoch") parser.add_argument("--epsilon", type=float, default=0.1, help="Greedy Epsilon starting value") parser.add_argument("--gamma", type=float, default=0.99, help="DQN gamma starting value") parser.add_argument("--replay_buffer", type=float, default=100, help="Replay buffer size") parser.add_argument("--load_model", action='store_true', default=False, help="Load saved model") parser.add_argument("--test", action='store_true', default=False, help="Testing phase") parser.add_argument("--nodisplay", action='store_true', default=False, help="Show V-REP display") parser.add_argument("--cuda", action='store_true', default=False, help="Use CUDA") parser.add_argument("--viz", action='store_true', default=False, help="Use Visdom Visualizer") args = parser.parse_args() print("Using Parameters:") print("Epoch Size: %d" % args.epoch_size) print("Episode Size: %d" % args.episode_size) print("Epsilon: %f" % args.epsilon) print("Gamma: %f" % args.gamma) print("Replay buffer size: %f" % args.replay_buffer) print("Testing Phase: %s" % str(args.test)) print("Using CUDA: %s\n" % str(args.cuda)) # Initialize classes control_quad = QuadHelper() dqn_quad = QuadDQN(args.cuda, args.epoch_size, args.episode_size) if args.viz: viz = Visualizer() main_quad = Quad(dqn_quad=dqn_quad, control_quad=control_quad, visualizer=viz) else: main_quad = Quad(dqn_quad=dqn_quad, control_quad=control_quad, visualizer=None) # Argument parsing dqn_quad.buffer_size = args.replay_buffer dqn_quad.eps = args.epsilon dqn_quad.gamma = args.gamma if args.load_model: dqn_quad.load_wts('dqn_quad.pth') if args.nodisplay: control_quad.display_disabled = True main_quad.display_disabled = True while (vrep.simxGetConnectionId(control_quad.sim_quad.clientID) != -1): with open('dqn_outputs.txt', 'a') as the_file: log_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') the_file.write('\n************* SAVE FILE %s *************\n' % log_time) if args.test: print("Testing Quadrotor") # Test our trained quadrotor main_quad.mode = 'test' dqn_quad.load_wts('dqn_quad.pth') main_quad.test_quad() else: # Train quadcopter epoch = 0 while epoch < dqn_quad.epoch_size: main_quad.run_one_epoch(epoch) print("Epoch reset") epoch += 1 main_quad.task_every_n_epochs(epoch) print('\n') print("Finished training") # Test quadrotor main_quad.mode = "test" main_quad.test_quad() control_quad.sim_quad.exit_sim() print("V-REP Exited...")
#faces = np.array(np.genfromtxt('sphere_quads_coarse.csv', delimiter=';')) #verts = np.array(np.genfromtxt('sphere_verts_coarse.csv', delimiter=';')) #faces = np.array([[0, 1 , 2, 3], [1, 5, 6, 2], [0, 1, 5, 4], [3, 2, 6, 7], [0, 3, 7, 4], [4, 5, 6, 7]]) #verts = np.array([[0.0, 0.0, 0.0], [1.0, 0.0, 0.0], [1.0, 0.0, 1.0], [0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [1.0, 1.0, 0.0], [1.0, 1.0, 1.0], [0.0, 1.0, 1.0]]) quads = [None] * faces.shape[0] listOfVertices = [] for i in range(len(verts)): listOfVertices.append(Vertex(i, verts[i])) for i in range(faces.shape[0]): face_vertices = [ listOfVertices[faces[i].astype(int)[j]] for j in range(len(faces[i])) ] quads[i] = Quad(i, face_vertices) for vertex in face_vertices: vertex.addNeighbouringFace(quads[i]) #neighbours_test = quads[4].find_neighbors(quads) [vertices_refined, faces_refined] = DooSabin(listOfVertices, quads, 0.5, 1) [vertices_refined1, faces_refined1] = DooSabin(vertices_refined, faces_refined, 0.5, 2) fig = plt.figure() ax = Axes3D(fig) ax.set_aspect('equal') x = [] y = [] z = []
cv.namedWindow('input', cv.WINDOW_GUI_NORMAL | cv.WINDOW_AUTOSIZE) cv.moveWindow('input', 100, 500) cv.namedWindow('output', cv.WINDOW_GUI_NORMAL | cv.WINDOW_AUTOSIZE) cv.moveWindow('output', 750, 500) cv.namedWindow('filtered', cv.WINDOW_GUI_NORMAL | cv.WINDOW_AUTOSIZE) cv.moveWindow('filtered', 1250, 500) capture = cv.VideoCapture(4) capture.set(cv.CAP_PROP_AUTO_WB, False) def updateStorage(): store.put('quad', quad.getPoints()) quad = Quad(store.get('quad'), MouseHandler(debug and 'input'), updateStorage) outputRes = 480 outputSize = (outputRes, outputRes) squarePoints = ((0, 0), (outputRes, 0), (0, outputRes), (outputRes, outputRes)) lowerRed = np.array([2, 200, 140]) upperRed = np.array([15, 256, 220]) #lowerRed = np.array([100, 60, 20]) #upperRed = np.array([130, 180, 250]) kernel = np.ones((10, 10), np.uint8) def printColor(x, y): print(hsv[y][x])
def main(): quad = Quad() ### instantiate a quadcopter ### the timing parameters for the quad are used ### to construct the koopman operator and the adjoint dif-eq koopman_operator = KoopmanOperator(quad.time_step) adjoint = Adjoint(quad.time_step) task = Task() ### this creates the task simulation_time = 1000 horizon = 20 ### time horizon sat_val = 6.0 ### saturation value control_reg = np.diag([1.] * 4) ### control regularization inv_control_reg = np.linalg.inv(control_reg) ### precompute this default_action = lambda x: np.random.uniform(-0.1, 0.1, size=( 4, )) ### in case lqr control returns NAN ### initialize the state #_R = np.diag([1.0, 1.0, 1.0]) _R = euler2mat(np.random.uniform(-1., 1., size=(3, ))) _p = np.array([0., 0., 0.]) _g = RpToTrans(_R, _p).ravel() _twist = np.random.uniform(-1., 1., size=(6, )) #* 2.0 state = np.r_[_g, _twist] target_orientation = np.array([0., 0., -9.81]) err = np.zeros(simulation_time) for t in range(simulation_time): #### measure state and transform through koopman observables m_state = get_measurement(state) t_state = koopman_operator.transform_state(m_state) err[t] = np.linalg.norm(m_state[:3] - target_orientation) + np.linalg.norm( m_state[3:]) Kx, Ku = koopman_operator.get_linearization( ) ### grab the linear matrices lqr_policy = FiniteHorizonLQR( Kx, Ku, task.Q, task.R, task.Qf, horizon=horizon) # instantiate a lqr controller lqr_policy.set_target_state( task.target_expanded_state ) ## set target state to koopman observable state lqr_policy.sat_val = sat_val ### set the saturation value ### forward sim the koopman dynamical system (here fdx, fdu is just Kx, Ku in a list) trajectory, fdx, fdu, action_schedule = koopman_operator.simulate( t_state, horizon, policy=lqr_policy) ldx, ldu = task.get_linearization_from_trajectory( trajectory, action_schedule) mudx = lqr_policy.get_linearization_from_trajectory(trajectory) rhof = task.mdx(trajectory[-1]) ### get terminal condition for adjoint rho = adjoint.simulate_adjoint(rhof, ldx, ldu, fdx, fdu, mudx, horizon) ustar = -np.dot(inv_control_reg, fdu[0].T.dot( rho[0])) + lqr_policy(t_state) ustar = np.clip(ustar, -sat_val, sat_val) ### saturate control if np.isnan(ustar).any(): ustar = default_action(None) ### advacne quad subject to ustar next_state = quad.step(state, ustar) ### update the koopman operator from data koopman_operator.compute_operator_from_data( get_measurement(state), ustar, get_measurement(next_state)) state = next_state ### backend : update the simulator state ### we can also use a decaying weight on inf gain task.inf_weight = 100.0 * (0.99**(t)) if t % 100 == 0: print('time : {}, pose : {}, {}'.format(t * quad.time_step, get_measurement(state), ustar)) t = [i * quad.time_step for i in range(simulation_time)] plt.plot(t, err) plt.xlabel('t') plt.ylabel('tracking error') plt.show()