def uploadMotion(self, name, meta): if (name in [x.jsonData['name'] for x in self.specification.motions.values()]): return 'exists' meta = json.loads(meta) if (not 'name' in meta.keys()): return 'missing-name' if (not 'fps' in meta.keys()): return 'missing-fps' if (not 'keyframes' in meta.keys()): return 'missing-keyframes' mo = Motion() mo.jsonData = meta mo.save() self.specification.motions[mo.jbIndex] = mo self.specification.save() return 'uploaded'
def uploadMotion(self, name, meta): """ receives motion data """ if (name in [ x.jsonData['name'] for x in self.specification.motions.values() ]): return 'exists' meta = json.loads(meta) if (not 'name' in meta.keys()): return 'missing-name' if (not 'fps' in meta.keys()): return 'missing-fps' if (not 'keyframes' in meta.keys()): return 'missing-keyframes' mo = Motion() mo.jsonData = meta mo.save() self.specification.motions[mo.jbIndex] = mo self.specification.save() return 'uploaded'
def createStormTrack(self, points, stormTrackStart, hazardStart, hazardEnd, motion, footprint, returnTime1=False): # Create point track # Input: # points -- list of [(lon,lat), time)] where time is in milliseconds past 1970 # What about climate data? # motion -- (speed, bearing) # footprint -- what is that? # # Unpack points i = 0 latLonList = [] timeList = [] for latLon, t in points: lat, lon = latLon latLon = LatLonCoord(lat, lon) latLonList.append(latLon) # Convert from ms to seconds timeList.append(t) i += 1 drtTime = hazardStart # Initialize pt = PointTrack() if motion == 0: speed = 20 # kts bearing = 225 # degrees else: speed, bearing = motion motion = Motion(speed, bearing) if len(points) == 1: pt._PT_latLon_motion_origTime(latLonList[0], timeList[0], motion, drtTime) elif len(points) == 2: pt._PT_two_latLon_origTime(latLonList[0], timeList[0], latLonList[1], timeList[1], drtTime) elif len(points) == 3: pt._PT_three_latLon_origTime(latLonList[0], timeList[0], latLonList[1], timeList[1], latLonList[2], timeList[2], drtTime) if returnTime1: return pt, timeList[0] else: return pt
def create_player(world, camera, ic_x, ic_y, ic_sprint, ic_dash): # Shared data spr_head = Sprite("head") # Components transform = Transform() bcontrol = BoostControl(ic_dash) dcontrol = DashControl(ic_dash) rend = Renderable() head = rend.add_sprite(spr_head) rend.camera = camera mcontrol = MovementControl() mcontrol.add_dir_control(ic_x, Vector(1, 0)) mcontrol.add_dir_control(ic_y, Vector(0, 1)) mcontrol.ic_sprint = ic_sprint bubble = Bubble() bubble.camera = camera bubble.add_bubble_sprite(BubbleSprite("head", 2, sbo=64 + 63, scale_func = lambda x,y: spr_head.transform.scale, opacity_func = lambda x,y: spr_head.opacity)) bubble.add_bubble_sprite(BubbleSprite("offscreen_bubble", 1, sbo=112.5, scale_func = lambda x,y: spr_head.transform.scale, opacity_func = lambda x,y: 0.8, rot_func = lambda bub_spr,_: mathutils.DEG_180-Vector(bub_spr.offscreen_x, bub_spr.offscreen_y).to_rot()+mathutils.DEG_90)) bubble.roo = 64 attack_boxes = AttackBoxes() attack_boxes.hurtboxes.append(Hurtbox(CircleCollider(Vector(0, 0), 64, rel_pos=lambda pos: transform.pos.added_to(pos)))) collidable = Collidable([CircleCollider(Vector(0, 0), 64, rel_pos=lambda pos: transform.pos.added_to(pos))]) collidable.bounce = 0.1 return world.create_entity(transform, rend, mcontrol, bcontrol, dcontrol, bubble, attack_boxes, collidable, Trail(), PlayerTag(), Motion(), Trail(), Particles(), Damage(), Stunnable())
while True: DISPLAYSURF.fill(black) pygame.draw.rect(DISPLAYSURF, sea, (250, 250, 1000, 1000)) for ship in test: ship.clear() for event in pygame.event.get(): if event.type == QUIT: pygame.quit() sys.exit() elif event.type == KEYDOWN or event.type == KEYUP: for ship in test: Motion.motionRecord(ship, event) for ship in test: Motion.motionDisplay(DISPLAYSURF, ship) test[0].showSituation(DISPLAYSURF, 0, 250) test[1].showSituation(DISPLAYSURF, 1300, 250) for ship in test: for shipE in test: if ship.player != shipE.player: Motion.cannonFire(ship, shipE) for ship in test: Motion.torpedoFire(ship, torlist) for torpedoOb in torlist:
from ev3dev.ev3 import * import os, time import pid import sys import Motion # os.system('espeak -s 120 -v zh \'woyaoshiyong\' --stdout|aplay') # os.system('espeak -s 120 -v en-us \'p i d\' --stdout|aplay') # os.system('espeak -s 120 -v zh \'suanfa\' --stdout|aplay') # pid= pid.PID(P=sys.argv[1],I=sys.argv[2],D=sys.argv[3]) pid = pid.PID(P=0.2, I=0.2, D=0.3) motion = Motion.motion() while True: # time.sleep(1) angle = motion.getGyro() pid.update(angle) out = int(pid.output * 10) print "\n" print angle print out print motion.getDist() if out >= 1000: out = 1000 elif out <= -1000: out = -1000 if angle >= 0: motion.motion_left(-out) else:
suppress=True) # Format output on terminal print("INFO: Debug messages enabled.") print("INFO: Connecting to vehicle.") while (not pixhawkObj.vehicle_connect(connection_string, connection_baudrate)): pass print("INFO: Vehicle connected.") if pixhawkObj.is_vehicle_connected: pixhawkObj.setmode() d435Obj = d435.rs_d435(framerate=30, width=480, height=270) posObj = Position.position(pixhawkObj) mapObj = map.mapper() motionObj = Motion.motion(pixhawkObj) #Schedules Mavlink Messages in the Background at predetermined frequencies sched = BackgroundScheduler() if pixhawkObj.enable_msg_vision_position_estimate: sched.add_job(posObj.loop, 'interval', seconds=1 / vision_position_estimate_msg_hz) if pixhawkObj.enable_update_tracking_confidence_to_gcs: sched.add_job(posObj.conf_loop, 'interval', seconds=1 / pixhawkObj.update_tracking_confidence_to_gcs_hz_default)
map_height = win_height robot_width = 20 robot_height = 20 robot_pos_x = map_width/2 robot_pos_y = map_height/2 robot_image = "gui/rocket.png" win = MainWindow(win_width, win_height, "Monte Carlo Localization - Demo application") win.FPS = 30 win.set_option("seed", map_seed) vs = (15,15) vision = Vision.Sensor(vs) motion = Motion.Sensor() # OPTIONS for noise: GAUSSIAN, SALT, PEPPER, SALT_PEPPER, SPECKLE # OPTIONS for model: MDIFF, SQDIFF, CCORR, CCOEFF vision_sensor = Vision.Sensor(vs, fps=5) vision_sensor_noise = Vision.SensorNoiseModel.GAUSSIAN(0.5) vision_sensor.set_noise_model(vision_sensor_noise) vision_model = Vision.ObservationModel.MDIFF # OPTIONS for noise: GAUSSIAN, ADVANCED # OPTIONS for model: GAUSSIAN, ADVANCED motion_sensor = Motion.Sensor(fps = 15) motion_sensor_noise = Motion.SensorNoiseModel.ADVANCED( (0.3,0.5) ) motion_sensor.set_noise_model(motion_sensor_noise) motion_model = Motion.ObservationModel.ADVANCED
"nico_humanoid_upper.json")) # parser.add_argument('--subset', nargs='?', default=None, # help="joint subset file") parser.add_argument('--speed', nargs='?', default="0.05", help="speed of movement") parser.add_argument('--vrep', action="store_true", default=False, help="let it run on vrep than instead of real robot") parser.add_argument('--stiffoff', action="store_true", default=False, help="sets the stiffness to off after movement") args = parser.parse_args() # print args connected = False while (not connected): try: robot = Motion.Motion(args.json, vrep=args.vrep) connected = True except Exception, e: print('\nFailed to connect to robot: ' + str(e)) print "\n Not connected. Let me try it again." time.sleep(1) freezer = Freezer(robot, stiff_off=args.stiffoff) # robot.enableTorqueAll() while(True): print "\b Give command (fla,fra,fh,lla,lra,lh,or,ol,cl,cr):" command = raw_input() # print command + " " + subsets[command[1:]] + "\n" if command[:1] == "f":
#! python # -*- coding: utf-8 -*- from Motion import * from Manuell import * from Kompass import * from time import sleep from SMBUSBatt import * from Scanner3d import * if __name__ == "__main__": man = Manuell() motion = Motion() kompass = Kompass() batterie = SMBUSBatt() scanner = Scanner() start = time.time() scanner.init_3D_scan( min_pitch=10, max_pitch=15, min_heading=-15.0, max_heading=15.0, ) ThreadEncoder = Thread(target=man.runManuell, args=()) ThreadEncoder.daemon = True ThreadEncoder.start()
from Manuell import * from Karte import * from Motion import * from Weggeber import * from SMBUSBatt import * from Pathplaning import * from Scanner import * from math import * import pickle from Avoid import * from Transmit import * from Planer import * manuell = Manuell() motion = Motion() battery = SMBUSBatt() karte = Karte() weggeber = Weggeber() pathplaning = Pathplaning() scanner = Scanner() avoid = Avoid() transmit = Transmit() pumper = Pumper() planer = Planer(avoid, karte) ThreadEncoder = Thread(target=manuell.runManuell, args=()) ThreadEncoder.daemon = True ThreadEncoder.start() theta_goal = 180 x_goal = 0
def __init__(self): # initialize tcp_communicator for communicating with QT via TCP # pi ip address (comment for testing) ip = "10.0.1.55" # local ip address (uncomment for testing) # ip = "0.0.0.0" tcpPort = 9005 # streaming enable or disable streamingEnable = True # streaming attributes streamingIP = "10.0.1.54" streamingPort1 = "1234" streamingPort2 = "5678" streamingPort3 = "4321" streamingPort4 = "8765" # initialize tcp communicator self.tcp_communicator = TcpCommunicator(ip, tcpPort, streamingIP, streamingPort1, streamingPort2, streamingPort3, streamingPort4, streamingEnable, bind=True) # initialize hat with default address and frequency (comment when testing) hat_address = 0x40 frequency = 50 self.hat = Hat(hat_address, frequency) # self.pressureSensor = Sensor() self.liftBagCommunicator = Lift_Bag_Communicator({ "x": 0, "y": 0, "z": 0, "r": 0, "up": 0, "down": 0, "l": 0, "bag": 0 }) # initialize dummy hat for testing without the pi # self.hat = Dummy_Hat() thruster_base_pwm = 305 camera_base_pwm = 400 # adding devices to hat -- args: (device name, channel, base pwm) self.hat.addDevice("top_rear_thruster", 9, thruster_base_pwm) self.hat.addDevice("top_front_thruster", 7, thruster_base_pwm) self.hat.addDevice("left_rear_thruster", 3, thruster_base_pwm) self.hat.addDevice("right_rear_thruster", 5, thruster_base_pwm) self.hat.addDevice("left_front_thruster", 11, thruster_base_pwm) self.hat.addDevice("right_front_thruster", 0, thruster_base_pwm) self.hat.addDevice("camera_servo", 15, camera_base_pwm) self.hat.addDevice("light", 13, 0) # list of system components components = [] # motion equation component -- args (hat, angle zeros) self.motion = Motion(self.hat, { "x": 0, "y": 0, "z": 0, "r": 0, "up": 0, "down": 0, "l": 0, "bag": 0 }) components.append(self.liftBagCommunicator) components.append(self.motion) # publisher self.publisher = Publisher() # bind component listeners to TCP and I2C events in publisher for component in components: self.publisher.registerEventListener("TCP", component.update) self.publisher.registerEventListener("TCP ERROR", component.update) self.publisher.registerEventListener("CLOCK", self.motion.update) self.publisher.registerEventListener("CLOCK", self.hat.update) self.publisher.registerEventListener("HAT", self.hat.update) self.publisher.registerEventListener( "SENSOR", self.tcp_communicator.sendFeedback) self.publisher.registerEventListener( "BAG", self.tcp_communicator.sendToLiftBag) self.tcp_communicator.registerCallBack(self.publisher.trigger_event) self.motion.registerCallBack(self.publisher.trigger_event) self.liftBagCommunicator.registerCallBack(self.publisher.trigger_event) # self.pressureSensor.registerCallBack(self.publisher.trigger_event) # create interrupter and bind to I2C event trigger callback # (when commented, pwms are only updated in the hat on change) # self.interrupter = Interrupter(self.publisher.trigger_event, "SENSOR") self.interrupter = Dummy_Interrupter(self.publisher.trigger_event, "SENSOR", 'dummy;data') # Main loop self.tcp_communicator.mainLoop()
import socket import Motion_pre as mo_pre import Motion as mo s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) port = 12346 s.connect(('192.168.43.163', port)) count = 0 comm = mo_pre.Motion_pre() motive_motion = mo.Movement() while (True): try: h = s.recv(1024).decode('utf-8').split(',') if count != 5: count += 1 else: print(" Debug ", h[2:4], h[9:11]) print(" Debug ", h) left_right = int(h[2][2:-1]) forward = int(h[3][2:-1]) err_on = h[9][2:-1] err_off = h[10][2:-1] print(" Input :", left_right, forward, err_on, err_off) output = comm.input(forward, left_right, err_on, err_off) print(" Intermidiate Output :", output) motive_motion.move(output) #the code fro final output except ValueError: print(" Will still continue") except KeyboardInterrupt:
def __init__(self): ip = "0.0.0.0" port = 8005 self._topcommunicator = TcpCommunicator(ip, port, bind=True) self._botcommunicator = I2cCommunicator() self._myhardware = Hardware(self._botcommunicator) #===============AVR self._Avr1address = 6 self._Avr2address = 7 self._myhardware.addAVR(self._Avr1address) self._myhardware.addAVR(self._Avr2address) #===============Devices motorsbasepwm = 1440 zero = 0 cameraservobase = 1500 RGBwhite = 7 self._myhardware._avrList[0].addDevice("Front_Right_Thruster", motorsbasepwm, 2) self._myhardware._avrList[0].addDevice("Front_Left_Thruster", motorsbasepwm, 2) self._myhardware._avrList[0].addDevice("Back_Right_Thruster", motorsbasepwm, 2) self._myhardware._avrList[0].addDevice("Back_Left_Thruster", motorsbasepwm, 2) self._myhardware._avrList[0].addDevice("Up_Front_Thruster", motorsbasepwm, 2) self._myhardware._avrList[0].addDevice("Up_Back_Thruster", motorsbasepwm, 2) self._myhardware._avrList[0].addDevice("DC", zero, 1) self._myhardware._avrList[1].addDevice("Camera_Servo", cameraservobase, 2) self._myhardware._avrList[1].addDevice("LED", RGBwhite, 1) self._myhardware._avrList[1].addDevice("Cycle_Flag", zero, 1) #=============Components #identifiers must be in the form of a list self._rovmanipulator = Manipulator(self._myhardware, {"grip": 0}) self._rovLights = Lights(self._myhardware, {"led": 7}) self._rovCamera = Camera(self._myhardware, {"cam": 0}) self._rovmotion = Motion(self._myhardware, { "x": 0, "y": 0, "z": 0, "r": 0, "currentmode": "normal" }) modules = [ self._rovmanipulator, self._rovLights, self._rovCamera, self._rovmotion ] #=============PostOffcie self.mypostoffice = PostOffice() for module in modules: self.mypostoffice.registerEventListner("TCP", module.mail) self.mypostoffice.registerEventListner("TCP ERROR", module.mail) self.mypostoffice.registerEventListner("I2C", self._rovmanipulator.mail) self.mypostoffice.registerEventListner( "I2C", self._myhardware._avrList[1].mail) self.mypostoffice.registerEventListner( "I2C", self._myhardware._avrList[0].mail) self.mypostoffice.registerEventListner("Sensors", self._topcommunicator._send) self._topcommunicator.registerCallBack(self.mypostoffice.triggerEvent) #=============Sensors # self._mysensors=SensorRegistry() # self._mysensors.registerSensor(sensor) # self._mysensors.registerCallBack(self.mypostoffice.triggerEvent) #=====intterupts self._interruptor = Interrupt() self._interruptor.register(23, False, self.mypostoffice.triggerEvent, "I2C") self._topcommunicator._mainLoop()
def main(): parser = argparse.ArgumentParser(description="Get imgs") parser.add_argument( '-w', type=str, default="/home/motion/TRINA/Motion/data/TRINA_world_cholera.xml", help="world file") parser.add_argument('-n', type=str, default="cholera", help="codename") parser.add_argument( '-c', nargs='*', default=['zed_slam_left', 'zed_slam_right', 'realsense_slam_l515'], help="cameras to use") parser.add_argument('--components', nargs='*', default=['left_limb', 'right_limb', 'base'], help="components to use") parser.add_argument('-m', type=str, default='Kinematic', help='mode to operate in') args = parser.parse_args() mc = Motion.MotionClient() mc.restartServer(mode=args.m, components=args.components, codename=args.n) world = klampt.WorldModel() world.loadFile(args.w) sensor_module = Camera_Robot(mc, world=world, cameras=args.c) time.sleep(1) camera_names = ["realsense_slam_l515", "zed_slam_left", "zed_slam_right"] topic_names = [] rgb_suf = "_rgb" d_suf = "_d" for cn in camera_names: topic_names.append(cn + rgb_suf) topic_names.append(cn + d_suf) rate = rospy.Rate(30) pubs = {} topic_pref = "images/" for name in topic_names: full_name = topic_pref + name pub = rospy.Publisher(full_name, Image, queue_size=10) pubs[full_name] = pub #rospy.init_node("sim_pipe", anonymous=True) bridge = CvBridge() max_d = 6.0 min_d = 0.0 while not rospy.is_shutdown(): img_dict = sensor_module.get_rgbd_images() for name in camera_names: img = img_dict[name] if len(img) > 0: color = img[0] # Normalize depth to be 0 <= d < 1 depth = (img[1] - min_d) / (max_d - min_d) # Fit in maximum range of 16 (unsigned) bits depth *= (2**16 - 1) depth = depth.astype("uint16") depth = bridge.cv2_to_imgmsg(depth, "16UC1") pubs[topic_pref + name + rgb_suf].publish( bridge.cv2_to_imgmsg(color, "8UC3")) pubs[topic_pref + name + d_suf].publish(depth) rate.sleep()
### ====================================================================================================== ### Streaming On Each Frame -- Part 3 ### ====================================================================================================== ## ================ Video Capture Initialize ============= ## cap = cv2.VideoCapture('../video1.mp4') # cap = cv2.VideoCapture(0) ## ===================== Start Streaming ================= ## position = np.identity(4) initialPoint = np.array([0, 0, 0]) finalPoint = np.array([0, 500, 0]) step = Motion.getMotionStep(initialPoint, finalPoint, 5) alpha = 0.25 count = 0 area = math.inf h, w = model.shape pts = np.float32([[0, 0], [0, h - 1], [w - 1, h - 1], [w - 1, 0]]).reshape(-1, 1, 2) print(pts) def get_smoothened_homo(H_old, H_new, alpha): return H_old * (1 - alpha) + H_new * (alpha)