def __init__(self, timesteps=32, gamma=0.99, epsilon=1.0, epsilon_min=0.01, epsilon_log_decay=0.99, alpha=0.01): self.supervisor = Supervisor() self.robot_node = self.supervisor.getFromDef("MY_BOT") if self.robot_node is None: sys.stderr.write( "No DEF MY_ROBOT node found in the current world file\n") sys.exit(1) self.trans_field = self.robot_node.getField("translation") self.rot_field = self.robot_node.getField("rotation") self.timestep = timesteps self.camera = Camera('camera') self.camera.enable(self.timestep) self.init_image = self.get_image() self.timestep = timesteps self.receiver = Receiver('receiver') self.receiver.enable(self.timestep) self.emitter = Emitter('emitter') self.memory = deque(maxlen=50000) self.batch_size = 128 self.alpha = alpha self.gamma = gamma self.epsion_init = epsilon self.epsilon_min = epsilon_min self.epsilon_decay = epsilon_log_decay self.pre_state = self.init_image self.pre_action = -1 self.pre_go_straight = False self.reward = 0 self.step = 0 self.max_step = 200 self.file = None # interactive self.feedbackProbability = 0 self.feedbackAccuracy = 1 self.PPR = False self.feedbackTotal = 0 self.feedbackAmount = 0 self.init_model() self.init_parametter()
def initialization(self): self.mode = self.Mode.AVOIDOBSTACLES self.camera = self.getCamera('camera') self.camera.enable(4 * self.timeStep) width = Camera.getWidth(self.camera) height = Camera.getHeight(self.camera) imagecameraki = Camera.getImage(self.camera) i = width / 3 j = height / 2 k = height / 4 for( l < 2 * i):
def setupDevice(self): self.leftMotor = self.robot.getDevice('left wheel motor') self.rightMotor = self.robot.getDevice('right wheel motor') self.leftMotor.setPosition(float('inf')) self.rightMotor.setPosition(float('inf')) self.rightDistanceSensor = self.robot.getDevice('ds1') self.leftDistanceSensor = self.robot.getDevice('ds0') self.rightDistanceSensor.enable(self.timestep) self.leftDistanceSensor.enable(self.timestep) self.gps = self.robot.getDevice('gps') self.touchSensor1 = self.robot.getDevice('touch_sensor1') self.touchSensor2 = self.robot.getDevice('touch_sensor2') self.touchSensor3 = self.robot.getDevice('touch_sensor3') self.touchSensor4 = self.robot.getDevice('touch_sensor4') self.touchSensor5 = self.robot.getDevice('touch_sensor5') self.gps.enable(self.timestep) self.touchSensor1.enable(self.timestep) self.touchSensor2.enable(self.timestep) self.touchSensor3.enable(self.timestep) self.touchSensor4.enable(self.timestep) self.touchSensor5.enable(self.timestep) self.camera = Camera('camera') self.camera.enable(self.timestep) self.leftMotor.setVelocity(0) self.rightMotor.setVelocity(0) self.init_leftValue = self.leftDistanceSensor.getValue() self.init_rightValue = self.rightDistanceSensor.getValue() self.receiver = Receiver('receiver') self.emitter = Emitter('emitter') self.receiver.enable(self.timestep)
def initialization(self): self.mode = self.Mode.AVOIDOBSTACLES self.camera = self.getCamera('camera') self.camera.enable(4 * self.timeStep) width = Camera.getWidth(self.camera) height = Camera.getHeight(self.camera) imagecameraki = Camera.getImage(self.camera) i = width / 3 j = height / 2 k = height / 4 for( l < 2 * i): for(m < 3 * k): green = Camera.imageGetGreen(imagecameraki,width, l,m) l += 1 m += 1 self.receiver = self.getReceiver('receiver') self.receiver.enable(self.timeStep) self.motors.append(self.getMotor("left wheel motor")) self.motors.append(self.getMotor("right wheel motor")) self.motors[0].setPosition(float("inf")) self.motors[1].setPosition(float("inf")) self.motors[0].setVelocity(0.0)
rospy.init_node('camera_test_node') robot = Robot() global camPub global rangePub camPub = rospy.Publisher('/camera/image', Image, queue_size=20) rangePub = rospy.Publisher('/range_finder/image', Image, queue_size=20) # get the time step of the current world. timestep = int(robot.getBasicTimeStep()) SAMPLE_TIME = 100 camera = robot.getDevice('camera') depth = robot.getDevice('range-finder') global rscam global depthcam rscam = Camera('camera') depthcam = RangeFinder('range-finder') depthcam.enable(SAMPLE_TIME) rscam.enable(SAMPLE_TIME) rospy.Subscriber('publish_images', Bool, camera_CB) clockPublisher = rospy.Publisher('clock', Clock, queue_size=1) if not rospy.get_param('use_sim_time', False): rospy.logwarn('use_sim_time is not set!') # Main loop: # - perform simulation steps until Webots is stopping the controller while robot.step(timestep) != -1 and not rospy.is_shutdown(): msg = Clock() time = robot.getTime() msg.clock.secs = int(time)
class RobotController: def __init__(self, max_steps=2, init_position=(0, 0, 0), final_position=(-0.3, 0, 0.3), max_speed=3, ): self.robot = Robot() self.timestep = int(self.robot.getBasicTimeStep()) self.max_steps = max_steps self.max_speed = max_speed self.setupDevice() self.init_position = init_position self.current_position = init_position self.final_position = final_position self.done = False # Interactive self.feedbackAmount = 0 # self.policy_reuse = PPR() def setupDevice(self): self.leftMotor = self.robot.getDevice('left wheel motor') self.rightMotor = self.robot.getDevice('right wheel motor') self.leftMotor.setPosition(float('inf')) self.rightMotor.setPosition(float('inf')) self.rightDistanceSensor = self.robot.getDevice('ds1') self.leftDistanceSensor = self.robot.getDevice('ds0') self.rightDistanceSensor.enable(self.timestep) self.leftDistanceSensor.enable(self.timestep) self.gps = self.robot.getDevice('gps') self.touchSensor1 = self.robot.getDevice('touch_sensor1') self.touchSensor2 = self.robot.getDevice('touch_sensor2') self.touchSensor3 = self.robot.getDevice('touch_sensor3') self.touchSensor4 = self.robot.getDevice('touch_sensor4') self.touchSensor5 = self.robot.getDevice('touch_sensor5') self.gps.enable(self.timestep) self.touchSensor1.enable(self.timestep) self.touchSensor2.enable(self.timestep) self.touchSensor3.enable(self.timestep) self.touchSensor4.enable(self.timestep) self.touchSensor5.enable(self.timestep) self.camera = Camera('camera') self.camera.enable(self.timestep) self.leftMotor.setVelocity(0) self.rightMotor.setVelocity(0) self.init_leftValue = self.leftDistanceSensor.getValue() self.init_rightValue = self.rightDistanceSensor.getValue() self.receiver = Receiver('receiver') self.emitter = Emitter('emitter') self.receiver.enable(self.timestep) def is_collised(self): if (self.touchSensor1.getValue() + self.touchSensor2.getValue() + self.touchSensor3.getValue() + self.touchSensor4.getValue() + self.touchSensor5.getValue() > 0): print(1, self.touchSensor1.getValue()) print(2, self.touchSensor2.getValue()) print(3, self.touchSensor3.getValue()) print(4, self.touchSensor4.getValue()) print(5, self.touchSensor5.getValue()) return True gpsValue = self.gps.getValues() self.current_position = gpsValue if (self.current_position[0] < - 0.5 or self.current_position[0] > 0.5 or self.current_position[2] < - 0.5 or self.current_position[2] > 0.5): return True return False def step(self, a): if not self.done: self.robot.step(self.timestep) if not self.is_collised(): leftValue = self.leftDistanceSensor.getValue() rightValue = self.rightDistanceSensor.getValue() reward = -0.1 leftSpeed, rightSpeed = 0, 0 if a == 0: leftSpeed, rightSpeed = self.turnLeft(leftValue, rightValue) elif a == 1: leftSpeed, rightSpeed = self.turnRight(leftValue, rightValue) elif a == 2: leftSpeed, rightSpeed = self.goStraight() reward = 0 # elif a == 3: # leftSpeed, rightSpeed = self.goSlow() self.leftMotor.setVelocity(leftSpeed) self.rightMotor.setVelocity(rightSpeed) # set observation ............. observations = leftValue, rightValue # set reward ............. # set done ......... r = self.set_done() return observations, reward + r, self.done, False else: observations = self.reset() reward = -100 return observations, reward, False, True return None, None, self.done, False def set_done(self): gpsValue = self.gps.getValues() self.current_position = gpsValue if abs(self.current_position[0] - self.final_position[0]) <= 0.08 and \ abs(self.current_position[2] - self.final_position[2]) <= 0.08: self.done = True return 1000 return 0 def random_action(self): return random.choice(self.action_space()) def goStraight(self): return self.max_speed, self.max_speed def goSlow(self): return self.max_speed / 4, self.max_speed / 4 def turnLeft(self, leftDistance, rightDistance): return -(leftDistance / 100), (rightDistance / 100) + 0.5 def turnRight(self, leftDistance, rightDistance): return (leftDistance / 100) + 0.5, -(rightDistance / 100) def reset(self): self.done = False return self.init_leftValue, self.init_rightValue def send_to_super(self, message, data): data = message, data dataSerialized = pickle.dumps(data) self.emitter.send(dataSerialized) def receive_handle(self): if self.receiver.getQueueLength() > 0: data = self.receiver.getData() message, action, step = pickle.loads(data) self.receiver.nextPacket() if message == 'step': obs, r, d, i = self.step(action) data = obs, r, d, i, step self.send_to_super('step_done', data) if message == 'reset': obs = self.reset() self.send_to_super('reset_done', obs) return -1 def start(self): while self.robot.step(self.timestep) != -1: self.receive_handle()
'LegLowerR', 'LegLowerL', 'AnkleR', 'AnkleL', 'FootR', 'FootL', 'Neck', 'Head') # List of position sensor devices. positionSensors = [] # Create the Robot instance. robot = Robot() robot.getSupervisor() basicTimeStep = int(robot.getBasicTimeStep()) # print(robot.getDevice("camera")) camera1=robot.getCamera("Camera") print(camera1) # camera= Camera(camera1) camera= Camera('Camera') # print(robot.getCamera('Camera')) # camera.wb_camera_enable() mTimeStep=basicTimeStep camera.enable(int(mTimeStep)) camera.getSamplingPeriod() # width=camera.getWidth() # height=camera.getHeight() firstimage=camera.getImage() ori_width = int(4 * 160) # 原始图像640x480 ori_height = int(3 * 160) r_width = int(4 * 20) # 处理图像时缩小为80x60,加快处理速度,谨慎修改! r_height = int(3 * 20) color_range = {'yellow_door': [(10, 43, 46), (34, 255, 255)], 'red_floor1': [(0, 43, 46), (10, 255, 255)], 'red_floor2': [(156, 43, 46), (180, 255, 255)],
"""data_collector controller.""" import random import time import csv import numpy as np from controller import Robot, Camera, PositionSensor # create the Robot instance. robot = Robot() # get the time step of the current world. timestep = int(robot.getBasicTimeStep()) # get and enable Camera cameraTop = Camera("CameraTop") cameraTop.enable(timestep) print("Width: ", cameraTop.getWidth()) print("Height: ", cameraTop.getHeight()) #cameraTop.saveImage("cameraImg.png", 50) cameraBottom = Camera("CameraBottom") cameraBottom.enable(timestep) #Move head in right position #HeadYaw = robot.getMotor("HeadYaw") #HeadPitch = robot.getMotor("HeadPitch") #HeadYaw.setPosition(0.5) #HeadYaw.setVelocity(1.0) #HeadPitch.setPosition(0.2)
robot.getMotor('leg5_servo1'), robot.getMotor('leg5_servo2'), ] # ds = robot.getDistanceSensor('dsname') # ds.enable(timestep) def move_forward(): motor_lst[1 + 0*3].setPosition(math.pi * -1 / 8) motor_lst[1 + 0*3].setVelocity(1.0) def rotate(angle): for i in range(6): motor_lst[0 + i*3].setPosition(angle) motor_lst[0 + i*3].setVelocity(1.0) camera = Camera("camera_d435i") camera.enable(15) print(camera.getSamplingPeriod()) camera.saveImage("~/test.png", 100) gyro = Gyro("gyro") gyro.enable(60) inertial_unit = InertialUnit("inertial_unit") inertial_unit.enable(60) # Main loop: # - perform simulation steps until Webots is stopping the controller def default_low_pos(): for i in range(6): motor_lst[0 + i*3].setPosition(0)
class Vehicle: stage = 0 MAX_SPEED = 2 robot = Robot() motors = [] armMotors = [] armSensors = [] armPs = [] camera = Camera('camera') compass = Compass('compass') ds = robot.getDistanceSensor('distance sensor') ds2 = robot.getDistanceSensor('distance sensor2') HEIGHT = camera.getHeight() WIDTH = camera.getWidth() foodStage = 0 p0 = WIDTH / 2 leftSpeed = 0.0 rightSpeed = 0.0 leftSum = 0 rightSum = 0 HALF = int(WIDTH / 2) frame = np.zeros((HEIGHT, WIDTH)) blur_im = np.zeros((HEIGHT, WIDTH)) position = WIDTH / 2 def __init__(self): #get motors, camera and initialise them. motorNames = ['left motor', 'right motor', 'tower rotational motor'] #, 'trigger motor'] for i in range(3): self.motors.append(self.robot.getMotor(motorNames[i])) self.motors[i].setPosition(float('inf')) if i <= 1: self.motors[i].setVelocity(0) else: self.motors[i].setVelocity(0.8) self.motors[i].setPosition(0) armMotorNames = [ "arm_motor_1", "arm_motor_2", "arm_motor_3", "arm_motor_4", "arm_motor_5", "arm_motor_6", "arm_motor_7", "arm_motor_8" ] for i in range(len(armMotorNames)): self.armMotors.append(self.robot.getMotor(armMotorNames[i])) self.armMotors[i].setPosition(0) self.armMotors[i].setVelocity(0.3) armSensorNames = [ "arm_position_sensor_1", "arm_position_sensor_2", "arm_position_sensor_3", "arm_position_sensor_4", "arm_position_sensor_5", "arm_position_sensor_6", "arm_position_sensor_7", "arm_position_sensor_8" ] for i in range(len(armSensorNames)): self.armSensors.append( self.robot.getPositionSensor(armSensorNames[i])) self.armSensors[i].enable(int(self.robot.getBasicTimeStep())) self.camera.enable(int(self.robot.getBasicTimeStep())) self.compass.enable(int(self.robot.getBasicTimeStep())) self.ds.enable(int(self.robot.getBasicTimeStep())) self.ds2.enable(int(self.robot.getBasicTimeStep())) def getStage(self): return self.stage def setStage(self, stage_num): self.stage = stage_num def getCompass(self): return np.angle( complex(self.compass.getValues()[0], self.compass.getValues()[2])) def getDistanceValue(self, input_ds=1): if input_ds == 1: return self.ds.getValue() if input_ds == 2: return self.ds2.getValue() def towerSeeLeft(self): self.motors[2].setPosition(np.pi / 2) @staticmethod def towerSeeRight(): Vehicle.motors[2].setPosition(-np.pi / 2) def towerRestore(self): self.motors[2].setPosition(0) def pickFood(self): if self.foodStage == 0: self.armMotors[0].setPosition(0.039) #upper-arm extend self.armMotors[1].setPosition(0.93) #upper-arm rotate if (self.armSensors[1].getValue() > 0.92): self.armMotors[2].setPosition(0.038) #mid-arm extend #print(sensor3.getValue()) if (self.armSensors[2].getValue() > 0.0379): self.armMotors[4].setPosition(0.005) #grab the box self.armMotors[5].setPosition(0.005) if ((self.armSensors[4].getValue() >= 0.00269) or (self.armSensors[5].getValue() >= 0.0031)): self.foodStage = 1 elif self.foodStage == 1: self.armMotors[2].setPosition(0) #raise the box self.armMotors[2].setVelocity(.1) self.armMotors[1].setPosition(0) self.armMotors[6].setPosition(0.9) if (self.armSensors[1].getValue() < 0.002): self.armMotors[0].setPosition(0) self.armMotors[0].setVelocity(0.01) #grabbing task finished if (self.armSensors[0].getValue() < 0.003): return True return False def releaseFood(self): if (self.armSensors[6].getValue() > 0.899): self.armMotors[3].setPosition(0.06) self.armMotors[7].setPosition(0.06) if ((self.armSensors[3].getValue() > 0.059) and (self.armSensors[7].getValue() > 0.059)): self.armMotors[4].setPosition(0) self.armMotors[5].setPosition(0) if ((self.armSensors[4].getValue() <= 0.00269) or (self.armSensors[5].getValue() >= 0.0031)): self.armMotors[3].setPosition(0) self.armMotors[7].setPosition(0) self.armMotors[6].setPosition(0) return True #Img Processing related @staticmethod def get_frame(): HEIGHT = Vehicle.HEIGHT WIDTH = Vehicle.WIDTH camera = Vehicle.camera cameraData = camera.getImage() # get image and process it frame = np.zeros((HEIGHT, WIDTH)) for x in range(0, Vehicle.WIDTH): for y in range(0, HEIGHT): gray = int(camera.imageGetGray(cameraData, WIDTH, x, y)) frame[y][x] = gray return frame @staticmethod def edge_detect(frame, threshold=254, maxVal=255, kernel_1=20, kernel_2=15): # threshold # Edge_LineTracking_T1:254; Edge_Bridge_detection_T3:180; Edge_Arch_detection_T4:100;Edge_Color_detect_T5:254 # maxVal # Edge_LineTracking_T1:255; Edge_Bridge_detection_T3:255; Edge_Arch_detection_T4:255;Edge_Color_detect_T5:255 # kernel_1 # Edge_LineTracking_T1:20; Edge_Bridge_detection_T3:20; Edge_Arch_detection_T4:20;Edge_Color_detect_T5:20 # kernel_2 # Edge_LineTracking_T1:15; Edge_Bridge_detection_T3:15; Edge_Arch_detection_T4:15;Edge_Color_detect_T5:15 # edge_detect Sobel x = cv2.Sobel(frame, cv2.CV_16S, 1, 0) y = cv2.Sobel(frame, cv2.CV_16S, 0, 1) absx = cv2.convertScaleAbs(x) absy = cv2.convertScaleAbs(y) dst = cv2.addWeighted(absx, 0.5, absy, 0.5, 0) #cv2.imwrite("dst.jpg", dst) # to binary ret, binary = cv2.threshold(dst, threshold, maxVal, cv2.THRESH_BINARY) #cv2.imwrite("binary.jpg", binary) # Smooth kernel1 = np.ones((kernel_1, kernel_1), np.float) / 25 smooth = cv2.filter2D(binary, -1, kernel1) # blur blur_im = cv2.boxFilter(smooth, -1, (kernel_2, kernel_2), normalize=1) #cv2.imwrite("blur.jpg", blur_im) return blur_im @staticmethod def positioning(blur_im, type_select=0, height_per_1=0.7, height_per_2=0.8): # height_per_1 # Edge_LineTracking_T1:0.7; Edge_Bridge_detection_T3:0.3; Edge_Arch_detection_T4:0.3;Edge_Color_detect_T5:0.6 # height_per_2 # Edge_LineTracking_T1:0.8; Edge_Bridge_detection_T3:0.6; Edge_Arch_detection_T4:0.8;Edge_Color_detect_T5:0.7 HEIGHT = Vehicle.HEIGHT WIDTH = Vehicle.WIDTH HALF = Vehicle.HALF axis = 0 num = 0 position = 0 axis_l = 0 axis_r = 0 num_l = 0 num_r = 0 position_l = 0 position_r = WIDTH - 1 if type_select == 0: for axis_v in range(int(HEIGHT * height_per_1), int(HEIGHT * height_per_2)): for axis_h in range(WIDTH * 0, WIDTH): if blur_im[axis_v][axis_h] != 0: axis = axis + axis_h num = num + 1 if num: position = axis / num + 1 elif type_select == 1: for axis_v in range(int(HEIGHT * height_per_1), int(HEIGHT * height_per_2)): for axis_h in range(WIDTH * 0, HALF): if blur_im[axis_v][axis_h] != 0: axis_l = axis_l + axis_h num_l = num_l + 1 if blur_im[axis_v][axis_h + HALF] != 0: axis_r = axis_r + axis_h + HALF num_r = num_r + 1 if num_l: position_l = axis_l / num_l + 1 if num_r: position_r = axis_r / num_r + 1 position = (position_l + position_r) / 2 else: print('INPUT ERROR!:positioning:type_select') return position @staticmethod def steering(position, type_select=0, rectify_pixel=0, base_speed=3.0, straight_speed=2.0): # rectify_pixel # Edge_LineTracking_T1:7; Edge_Bridge_detection_T3:5; Edge_Arch_detection_T4:5;Edge_Color_detect_T5:3 # base_speed # Edge_LineTracking_T1:3; Edge_Bridge_detection_T3:5; Edge_Arch_detection_T4:5;Edge_Color_detect_T5:20 # straight_speed # Edge_LineTracking_T1:2; Edge_Bridge_detection_T3:3; Edge_Arch_detection_T4:2;Edge_Color_detect_T5:2 WIDTH = Vehicle.WIDTH if type_select == 0: if abs(position - WIDTH / 2) > rectify_pixel: leftSpeed = (position / WIDTH) * base_speed rightSpeed = (1 - position / WIDTH) * base_speed else: leftSpeed = straight_speed rightSpeed = straight_speed elif type_select == 1: if abs(position - WIDTH / 2) > rectify_pixel: leftSpeed = (position / WIDTH - 0.5) * base_speed rightSpeed = (0.5 - position / WIDTH) * base_speed else: leftSpeed = straight_speed rightSpeed = straight_speed else: print('INPUT ERROR!:steering:type_select') return leftSpeed, rightSpeed #Speed setting functions def setSpeed(self, left, right): #set speed for four tracks self.motors[0].setVelocity(left) self.motors[1].setVelocity(right) def turnRound(self, diff): #set speed for turning left or right if abs(diff) < 0.001: self.setSpeed(0, 0) return False else: self.setSpeed(-0.3 * diff, 0.3 * diff) return True def linePatrol(self): #get camera image, process it and set speed based on line patrolling algorithm #return False if there is no line image = self.camera.getImage() leftSum = 0 rightSum = 0 leftSpeed = 0 rightSpeed = 0 cameraData = self.camera.getImage() HEIGHT = self.camera.getHeight() WIDTH = self.camera.getWidth() frame = np.zeros((HEIGHT, WIDTH)) for x in range(0, HEIGHT): for y in range(0, WIDTH): frame[y][x] = int( self.camera.imageGetGray(cameraData, WIDTH, x, y)) absX = cv2.convertScaleAbs(cv2.Sobel(frame, cv2.CV_16S, 1, 0)) absY = cv2.convertScaleAbs(cv2.Sobel(frame, cv2.CV_16S, 0, 1)) # to binary ret, binary = cv2.threshold(cv2.addWeighted(absX, 0.5, absY, 0.5, 0), 254, 255, cv2.THRESH_BINARY) binaryImg = cv2.boxFilter(cv2.filter2D( binary, -1, np.ones((20, 20), np.float) / 25), -1, (15, 15), normalize=1) positionSum = 0 positionCount = 0 for i in range(int(HEIGHT * 0.75), HEIGHT): for j in range(WIDTH * 0, WIDTH): if binaryImg[i][j] != 0: positionSum += j positionCount += 1 farpositionSum = 0 farpositionCount = 0 for i in range(int(HEIGHT * 0.32), int(HEIGHT * 0.44)): for j in range(WIDTH * 0, WIDTH): if binaryImg[i][j] != 0: farpositionSum += j farpositionCount += 1 if farpositionCount == 0: farcenter = 0 else: farcenter = farpositionSum / farpositionCount if abs(farcenter - WIDTH / 2) < 0.1: self.motors[0].setAcceleration(0.3) self.motors[1].setAcceleration(0.3) #("straight") else: self.motors[0].setAcceleration(20) self.motors[1].setAcceleration(20) #print("not straight") if positionCount or farpositionCount: if positionCount == 0: center = 80 else: center = positionSum / positionCount diff = 4 * (0.5 - center / WIDTH) if diff > 0.01: leftSpeed = (1 - 0.6 * diff) * self.MAX_SPEED rightSpeed = (1 - 0.3 * diff) * self.MAX_SPEED elif diff < -0.01: leftSpeed = (1 + 0.3 * diff) * self.MAX_SPEED rightSpeed = (1 + 0.6 * diff) * self.MAX_SPEED else: leftSpeed = self.MAX_SPEED rightSpeed = self.MAX_SPEED self.setSpeed(leftSpeed, rightSpeed) return True else: return False @staticmethod def linePatrol2(): # Task 1 # get_frame frame = Vehicle.get_frame() # edge_detect blur_im = Vehicle.edge_detect(frame, 100, 255, 20, 15) # positioning position = Vehicle.positioning(blur_im, 0, 0.7, 0.8) # steering leftSpeed, rightSpeed = Vehicle.steering(position, 0, 4, 0.8, 0.8) return leftSpeed, rightSpeed @staticmethod def box_found(): # Task 2 HALF = Vehicle.HALF frame = Vehicle.get_frame() x = cv2.Sobel(frame, cv2.CV_16S, 1, 0) y = cv2.Sobel(frame, cv2.CV_16S, 0, 1) absx = cv2.convertScaleAbs(x) absy = cv2.convertScaleAbs(y) dst = cv2.addWeighted(absx, 0.5, absy, 0.5, 0) cv2.imwrite("dst.jpg", dst) # to binary ret, filtered = cv2.threshold(dst, 50, 255, cv2.THRESH_TOZERO) cv2.imwrite("binary.jpg", filtered) position_i = Vehicle.positioning(filtered, 0, 0.32, 0.35) if abs(position_i - HALF) < 10: position = Vehicle.positioning(filtered, 0, 0.25, 0.3) if abs( position - HALF ) < 10: # this is a predict letting it stop exactly at the box center return True else: pass @staticmethod def bridge_found(): # Task 3 HALF = Vehicle.HALF WIDTH = Vehicle.WIDTH HEIGHT = Vehicle.HEIGHT count = 0 num1 = 0 num = 0 axis = WIDTH position_i = 0 frame = Vehicle.get_frame() x = cv2.Sobel(frame, cv2.CV_16S, 1, 0) y = cv2.Sobel(frame, cv2.CV_16S, 0, 1) absx = cv2.convertScaleAbs(x) absy = cv2.convertScaleAbs(y) dst = cv2.addWeighted(absx, 0.5, absy, 0.5, 0) # cv2.imwrite("dst.jpg", dst) # to binary ret, filtered = cv2.threshold(dst, 50, 255, cv2.THRESH_TOZERO) # cv2.imwrite("filtered.jpg", filtered) for axis_v in range(int(HEIGHT * 0.8), int(HEIGHT * 0.87)): for axis_h in range(HALF, WIDTH): if filtered[axis_v][axis_h] != 0: axis = axis + axis_h num1 = num1 + 1 if num1: position_i = axis / num1 + 1 if abs(position_i - 1.5 * HALF) < 10: # print('pi = ', position_i) for axis_v in range(int(HEIGHT * 0.82), int(HEIGHT * 0.85)): for axis_h in range(WIDTH * 0, int(WIDTH * 1)): if filtered[axis_v][axis_h] != 0: axis_i = axis_h axis = min(axis_i, axis) # print('axis = ', axis) if abs(axis - 0.5 * WIDTH) < 5: return True else: return False def arch_found(self, a, b): # Task 4 global count_arch HEIGHT = Vehicle.HEIGHT WIDTH = Vehicle.WIDTH camera = Vehicle.camera cameraData = camera.getImage() frame = np.zeros((HEIGHT, WIDTH)) Q = 0 position = 0 for x in range(0, WIDTH): for y in range(0, HEIGHT): gray = int(camera.imageGetGray(cameraData, WIDTH, x, y)) if 110 < gray < 130: frame[y][x] = 255 Q = Q + 1 else: pass #print(i) # cv2.imwrite('frame.jpg', frame) #blur_im = self.edge_detect(frame, 254, 255) #cv2.imwrite('blur.jpg', blur_im) if Q > 50: position = self.positioning(frame, 0, a, b) #print(i, position) if count_arch == 0: if int(position) < WIDTH / 2: count_arch = 1 for i in range(2): self.motors[i].setVelocity(0) return 1 else: if int(position) > WIDTH / 2: for i in range(2): self.motors[i].setVelocity(0) return 1 else: return 0 else: return 0 def count(self): HEIGHT = Vehicle.HEIGHT WIDTH = Vehicle.WIDTH camera = Vehicle.camera cameraData = camera.getImage() i = 0 for x in range(0, WIDTH): for y in range(0, HEIGHT): gray = int(camera.imageGetGray(cameraData, WIDTH, x, y)) if 110 < gray < 130: i = i + 1 else: pass #print(i) return i @staticmethod def colourPatrol(): # Task 5 HEIGHT = Vehicle.HEIGHT WIDTH = Vehicle.WIDTH camera = Vehicle.camera cameraData = camera.getImage() frame = np.zeros((HEIGHT, WIDTH)) color_kernel = np.zeros((3, 3)) compass = Vehicle.getCompass(Vehicle) position = HEIGHT / 2 leftSpeed = 0 rightSpeed = 0 global flag if (flag == -1) and (3.12 < abs(compass) < 3.15): for x in range(0, 3): for y in range(0, 3): gray = int( camera.imageGetGray(cameraData, WIDTH, x + int(0.5 * WIDTH), y + int(0.80 * HEIGHT))) color_kernel[y][x] = gray gray_average = np.mean(color_kernel) # color classification if 100 < gray_average < 130: flag = 0 # red elif 175 < gray_average < 200: flag = 1 # yellow elif 140 < gray_average < 170: flag = 2 # purple else: pass # print('color flag=', flag) leftSpeed = 2.0 rightSpeed = 2.0 elif flag == 0 or flag == 1 or flag == 2: for x in range(0, 3): for y in range(0, 3): gray = int( camera.imageGetGray(cameraData, WIDTH, x + int(0.5 * WIDTH), y + int(0.95 * HEIGHT))) color_kernel[y][x] = gray gray_average = np.mean(color_kernel) if 90 < gray_average < 100: flag = 3 # finished for y in range(0, HEIGHT): for x in range(0, WIDTH): gray = int(camera.imageGetGray(cameraData, WIDTH, x, y)) # color classification if flag == 0: if 100 < gray < 130: frame[y][x] = 255 else: pass elif flag == 1: if 175 < gray < 200: frame[y][x] = 255 else: pass elif flag == 2: if 140 < gray < 170: frame[y][x] = 255 else: pass # edge_detect Sobel x = cv2.Sobel(frame, cv2.CV_16S, 1, 0) y = cv2.Sobel(frame, cv2.CV_16S, 0, 1) absX = cv2.convertScaleAbs(x) absY = cv2.convertScaleAbs(y) dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0) # to binary ret, binary = cv2.threshold(dst, 254, 255, cv2.THRESH_BINARY) # Smooth kernel1 = np.ones((20, 20), np.float) / 25 smooth = cv2.filter2D(binary, -1, kernel1) # blur blur_im1 = cv2.boxFilter(smooth, -1, (15, 15), normalize=1) # cv2.imwrite('smooth_blur.jpg', blur_im1) # cv2.imwrite('gray.jpg', frame) # cv2.imwrite('frame.jpg', frame) # video.write(blur_im1) # Locate axis = 0 num = 0 for axis_v in range(int(HEIGHT * 0.8), int(HEIGHT * 0.9)): for axis_h in range(WIDTH * 0, WIDTH): if blur_im1[axis_v][axis_h] != 0: axis = axis + axis_h num = num + 1 if num: position = axis / num + 1 # Steering if 5 <= abs(position - WIDTH / 2): leftSpeed = (position / WIDTH) * 1.5 rightSpeed = (1 - position / WIDTH) * 1.5 elif 2.5 < abs(position - WIDTH / 2) < 5: leftSpeed = (position / WIDTH) * 0.9 rightSpeed = (1 - position / WIDTH) * 0.9 # if lines are losted in the view of camera, try adjust parameters here else: leftSpeed = 1 rightSpeed = 1 else: pass return leftSpeed, rightSpeed
def clamp(x, low, hi): return min(hi, max(low, x)) init_supervisor() # create the Robot instance. robot = supervisor print("Goal Location:", goal_location) # get the time step of the current world. timestep = int(robot.getBasicTimeStep()) # initialize camera camera = Camera('camera') camera.enable(1) # position camera camera_pitch_motor = robot.getMotor('camera pitch') camera_pitch_motor.setPosition(math.pi / 2) # Start propellers initializeMotors(robot) prop_takeoff_speed = 68.5 motorsSpeed(robot, prop_takeoff_speed, prop_takeoff_speed, prop_takeoff_speed, prop_takeoff_speed) # Initialize Gyro myGyro = robot.getGyro('gyro') myGyro.enable(timestep) # Initialize InertialUnit inertial_unit = InertialUnit('inertial unit') inertial_unit.enable(timestep)
class SupervisorController: def __init__(self, timesteps=32, gamma=0.99, epsilon=1.0, epsilon_min=0.01, epsilon_log_decay=0.99, alpha=0.01): self.supervisor = Supervisor() self.robot_node = self.supervisor.getFromDef("MY_BOT") if self.robot_node is None: sys.stderr.write( "No DEF MY_ROBOT node found in the current world file\n") sys.exit(1) self.trans_field = self.robot_node.getField("translation") self.rot_field = self.robot_node.getField("rotation") self.timestep = timesteps self.camera = Camera('camera') self.camera.enable(self.timestep) self.init_image = self.get_image() self.timestep = timesteps self.receiver = Receiver('receiver') self.receiver.enable(self.timestep) self.emitter = Emitter('emitter') self.memory = deque(maxlen=50000) self.batch_size = 128 self.alpha = alpha self.gamma = gamma self.epsion_init = epsilon self.epsilon_min = epsilon_min self.epsilon_decay = epsilon_log_decay self.pre_state = self.init_image self.pre_action = -1 self.pre_go_straight = False self.reward = 0 self.step = 0 self.max_step = 200 self.file = None # interactive self.feedbackProbability = 0 self.feedbackAccuracy = 1 self.PPR = False self.feedbackTotal = 0 self.feedbackAmount = 0 self.init_model() self.init_parametter() def init_model(self): self.main_network = self.build_network() self.target_network = self.build_network() self.agent_network = self.build_network() self.generalise_model = self.init_gereral_model() self.pca_model = self.init_pca_model() def init_parametter(self): self.epsilon = self.epsion_init self.episode = 0 self.policy_reuse = PPR() def init_gereral_model(self): n_clusters = 2 return KMeans(n_clusters=n_clusters, n_init=10) def init_pca_model(self): n_component = 100 return PCA(n_components=100, random_state=22) def get_image(self): image = self.camera.getImage() if image is None: empty_image = np.zeros((64, 64, 3)) return Image.fromarray(empty_image.astype(np.uint8)) else: return self.toPIL(image) def toPIL(self, bytes_data): imgPIL = Image.frombytes('RGBA', (64, 64), bytes_data) imgPIL = imgPIL.convert('RGB') return imgPIL def image_process(self, PIL): array = np.array(PIL) array = array / 255 return np.reshape(array, list((1, ) + array.shape)) def save_image(self, PIL, ep, step): PIL.save(resultsFolder + 'images/' + str(ep) + '_' + str(step) + '.png') def update_target(self): self.target_network.set_weights(self.main_network.get_weights()) def observation_space(self): return self.observation_space def get_epsilon(self, t): return max( self.epsilon_min, min(self.epsilon, 1.0 - math.log10((t + 1) * self.epsilon_decay))) def build_network(self): model = Sequential() model.add(Input(shape=(64, 64, 3))) model.add(Conv2D(4, kernel_size=8, activation='linear', padding='same')) model.add(MaxPooling2D((2, 2), padding='same')) model.add(Conv2D(8, kernel_size=4, activation='linear', padding='same')) model.add(MaxPooling2D((2, 2), padding='same')) model.add( Conv2D(16, kernel_size=2, activation='linear', padding='same')) model.add(MaxPooling2D((2, 2), padding='same')) model.add(Flatten()) model.add(Dense(256, activation='linear')) model.add(Dense(len(self.action_space()), activation='softmax')) opt = Nadam(learning_rate=self.alpha) model.compile(loss='mse', optimizer=opt) return model def save_reward(self, file, rewards, totals, feedbacks): pairs = {'Reward': rewards, 'Total': totals, 'Feedback': feedbacks} data_df = pd.DataFrame.from_dict(pairs) data_df.to_csv(file) def save_model(self, file): self.main_network.save_weights(file) self.save_generalise_model(file) def save_generalise_model(self, filename): obs = [s[5] for s in self.memory] with open(filename + 'gel', "wb") as f: pickle.dump(self.generalise_model, f) with open(filename + 'pca', "wb") as f: pickle.dump(self.pca_model, f) with open(filename + 'state', "wb") as f_: pickle.dump(obs, f_) def load_generalise_model(self, filename): with open(filename + 'gel', "rb") as f: print(filename + 'gel') self.generalise_model = pickle.load(f) with open(filename + 'pca', "rb") as f: self.pca_model = pickle.load(f) def load_model(self, file): self.agent_network.load_weights(file + '.model') self.load_generalise_model(file + '.model') self.update_target() def finalise(self, rewards, totals, feedbacks, ppr): file = self.file + '_' + str(self.feedbackProbability) + '_' + str( self.feedbackAccuracy) + str(ppr) self.save_reward(file + '.csv', rewards, totals, feedbacks) self.save_model(file + '.model') def get_group(self, state): # nx, ny, nz = state[0].shape # state = state.reshape(nx * ny * nz) # state = [state] # new_state = self.pca_model.transform(state) nx, ny, nz = state[0].shape image_grayscale = state[0].mean(axis=2).astype(np.float32) image_grayscale = image_grayscale.reshape(nx * ny) image_grayscale = [image_grayscale] return self.generalise_model.predict(image_grayscale)[0] def memorize(self, state, action, reward, next_state, done, obs): self.memory.append((state, action, reward, next_state, done, obs)) def updatePolicy(self, batchSize=0): if batchSize == 0: batchSize = self.batch_size if len(self.memory) < batchSize: self.trainNetwork(len(self.memory)) return # do nothing self.trainNetwork(batchSize) return def trainNetwork(self, batch_size): # sample a mini batch of transition from the replay buffer minibatch = random.sample(self.memory, batch_size) states = [] targets = [] for state, action, reward, next_state, done, obs in minibatch: state_processed = self.image_process(state) next_state_processed = self.image_process(next_state) if not done: target = self.target_network.predict(next_state_processed) target_Q = (reward + self.gamma * np.max(target[0])) else: target_Q = reward # compute the Q value using the main network Q_values = self.main_network.predict(state_processed) Q_values[0][action] = target_Q states.append(state_processed[0]) targets.append(Q_values[0]) # train the main network states = np.array(states) targets = np.array(targets) self.main_network.fit(states, targets, epochs=1, verbose=0) def normal_action(self, state, epsilon=0.1): # exploration if np.random.random() <= epsilon: action = self.random_action() # PPR: if self.PPR: group = self.get_group(state) redoAction, rate = self.policy_reuse.get(group) # print(group, rate) if (np.random.rand() < rate): action = redoAction # end PPR: # exploitation else: action = np.argmax(self.main_network.predict(state)) return action def action_space(self): """ 0: left 1: right 2: straight """ return [0, 1, 2] def random_action(self): # if np.random.rand() < 0.5: # return 2 # else: # return random.choice([0, 1]) return random.choice(self.action_space()) def propose_action(self, obs): return def has_obstacle(self, leftValue, rightValue): return leftValue > 500 or rightValue > 500 def back_to_begin(self): INITIAL = [0, 0, 0] self.trans_field.setSFVec3f(INITIAL) ROT_INITIAL = [0, 1, 0, 3.2] self.rot_field.setSFRotation(ROT_INITIAL) def reset(self): self.pre_state = self.init_image self.pre_action = -1 self.pre_go_straight = False self.reward = 0 self.step = 0 self.finish = False self.feedbackTotal = 0 self.feedbackAmount = 0 self.back_to_begin() self.send_to_robot('reset', None) def propose_new_action(self, obs): left, right = obs obstacle_flag = self.has_obstacle(left, right) pre_state_processed = self.image_process(self.pre_state) if not obstacle_flag: action = 2 self.pre_action = action self.pre_go_straight = True else: # propose new action ------------------ if self.PPR: self.policy_reuse.step() if np.random.rand() < self.feedbackProbability: # get advice trueAction = np.argmax( self.agent_network.predict(pre_state_processed)) # PPR: if self.PPR: group = self.get_group(pre_state_processed) self.policy_reuse.add(group, trueAction) # end PPR: if np.random.rand() < self.feedbackAccuracy: action = trueAction else: while True: action = self.random_action() if action != trueAction: break self.feedbackAmount += 1 else: action = self.normal_action(pre_state_processed, self.epsilon) self.pre_go_straight = False self.feedbackTotal += 1 self.pre_action = action return action def execute(self, obs, reward, done, info): state = self.get_image() if self.pre_action != -1: self.reward += reward if self.step == self.max_step or done: if done: self.save_image(state, self.episode, self.step) self.save_image(self.pre_state, self.episode, self.step - 1) self.memorize(self.pre_state, self.pre_action, reward, state, done, obs) self.updatePolicy(self.step) self.update_target() if self.epsilon > self.epsilon_min: self.epsilon *= self.epsilon_decay self.episode += 1 self.finish = True return if info: self.back_to_begin() if not self.pre_go_straight: self.memorize(self.pre_state, self.pre_action, reward, state, done, obs) self.pre_state = state return def receive_handle(self): send_message, send_data = None, None if self.receiver.getQueueLength() > 0: data = self.receiver.getData() message, d = pickle.loads(data) if message == 'step_done': obs, r, d, i, s = d # print(s, self.step - 1, self.pre_action, r) # check synchronize self.execute(obs, r, d, i) if not self.finish: action = self.propose_new_action(obs) self.send_to_robot('step', action) self.step += 1 if message == 'reset_done': obs = d self.execute(obs, 0, False, False) action = self.propose_new_action(obs) self.send_to_robot('step', action) if message == 'obstacle': self.back_to_begin() self.receiver.nextPacket() return def send_to_robot(self, message, data): data = message, data, self.step dataSerialized = pickle.dumps(data) self.emitter.send(dataSerialized) def start(self, max_step, episodes, file, feedbackP=0, feedbackA=1, PPR=False): self.file = file self.max_step = max_step self.feedbackProbability = feedbackP self.feedbackAccuracy = feedbackA self.PPR = PPR rewards = [] feedbackTotal = [] feedbackAmount = [] self.init_parametter() for i in range(episodes): self.reset() self.episode = i while self.supervisor.step( self.timestep) != -1 and not self.finish: self.receive_handle() print(i, self.reward, self.feedbackTotal, self.feedbackAmount) rewards.append(self.reward) feedbackTotal.append(self.feedbackTotal) feedbackAmount.append(self.feedbackAmount) self.finalise(rewards, feedbackTotal, feedbackAmount, PPR)
from vehicle import Driver from controller import Camera, Display, Keyboard import cv2 import numpy as np from numpy import array car = Driver() # cameraFront = Camera("cameraFront") cameraTop = Camera("cameraTop") display = Display("displayTop") display.attachCamera(cameraTop) keyboard = Keyboard() # cameraFront.enable(32) cameraTop.enable(32) keyboard.enable(32) while car.step() != -1: display.setColor(0x000000) display.setAlpha(0.0) display.fillRectangle(0, 0, display.getWidth(), display.getHeight()) img = cameraTop.getImage() image = np.frombuffer(img, np.uint8).reshape( (cameraTop.getHeight(), cameraTop.getWidth(), 4)) # cv2.imwrite("img.png", image) gray = cv2.cvtColor(np.float32(image), cv2.COLOR_RGB2GRAY) #--- vira a imagem da camera em 90 graus #gray270 = np.rot90(gray, 3)
from controller import Robot from controller import Keyboard from controller import Camera TIME_STEP = 1 robot = Robot() camera = Camera("camera") camera.enable(int(robot.getBasicTimeStep())) motors = [] motorNames = ['left motor', 'right motor', 'tower rotational motor'] for i in range(3): motors.append(robot.getMotor(motorNames[i])) motors[i].setPosition(float('inf')) motors[i].setVelocity(0.0) keyboard = robot.getKeyboard() keyboard.enable(TIME_STEP) leftSpeed = 0.0 rightSpeed = 0.0 SPEED = 0.0 while robot.step(TIME_STEP) != -1: key1 = keyboard.getKey() key2 = keyboard.getKey() #tower camera rotation if (key1 == 81 or key2 == 81): motors[2].setVelocity(0.8) elif (key1 == 69 or key2 == 69): motors[2].setVelocity(-0.8) else: motors[2].setVelocity(0) #Forward if (key1 == 87 or key2 == 87):
import cv2 import numpy as np hizi=6.28 maxMesafe=1024 #sensörün mesafede nasıl algı m min_uzaklık=1.0 # create the Robot instance. robot = Robot() # get the time step of the current world. timestep = int(robot.getBasicTimeStep()) camera=Camera("camera") # motorların tagını getirir #motorları getirir Camera.enable(camera,timestep) solMotorİleri=robot.getMotor("front left wheel") sağMotorİleri=robot.getMotor("front right wheel") sağMotorGeri=robot.getMotor("back right wheel") solMotorGeri=robot.getMotor("back left wheel") #motorları hareket etirir solMotorİleri.setPosition(float("inf")) solMotorGeri.setPosition(float("inf")) sağMotorİleri.setPosition(float("inf")) sağMotorGeri.setPosition(float("inf")) ps=[]
'PelvL', 'LegUpperR', 'LegUpperL', 'LegLowerR', 'LegLowerL', 'AnkleR', 'AnkleL', 'FootR', 'FootL', 'Neck', 'Head') # List of position sensor devices. positionSensors = [] # Create the Robot instance. robot = Robot() robot.getSupervisor() basicTimeStep = int(robot.getBasicTimeStep()) # print(robot.getDevice("camera")) camera1 = robot.getCamera("Camera") print(camera1) # camera= Camera(camera1) camera = Camera('Camera') # print(robot.getCamera('Camera')) # camera.wb_camera_enable() mTimeStep = basicTimeStep print(camera.enable(int(mTimeStep))) print(camera.getSamplingPeriod()) print(camera.getWidth()) print(camera.getHeight()) image = camera.getImage() # print(image) if image == None: print("none") # print(image.size()) # cameradata = cv2.VideoCapture('Camera') camera.saveImage('/home/luyi/webots.png', 100) # print(len(cap))
from controller import Robot from controller import Camera TIME_STEP = 64 robot = Robot() #Define motors motors = [] motorNames = ['left motor', 'right motor'] for i in range(2): motors.append(robot.getMotor(motorNames[i])) motors[i].setPosition(float('inf')) motors[i].setVelocity(0.0) motors[i].setAcceleration(25) camera = Camera('camera') camera.enable(int(robot.getBasicTimeStep())) SPEED = 2 while robot.step(TIME_STEP) != -1: leftSpeed = 0.0 rightSpeed = 0.0 #get image and process it image = camera.getImage() leftSum = 0 rightSum = 0 cameraData = camera.getImage() for x in range(0, camera.getWidth()): for y in range(int(camera.getHeight() * 0.9), camera.getHeight()): gray = Camera.imageGetGray(cameraData, camera.getWidth(), x, y) if x < camera.getWidth() / 2: leftSum += gray else:
from controller import Robot, Motor, DistanceSensor from ikpy.chain import Chain import numpy as np import time from controller import Camera, Device, CameraRecognitionObject robot = Robot() camera = Camera("camera") camera.enable(20) #firstObject = Camera.getRecognitionObjects()[0] #id = firstObject.get_id() #position = firstObject.get_position()
#sensörün mesafede nasıl algı m min_uzaklık = 1.0 # create the Robot instance. robot = Robot() # get the time step of the current world. timestep = int(robot.getBasicTimeStep()) #Camera ve RangeFinder getirir kinectColor = robot.getCamera("kinect color") kinectRange = robot.getRangeFinder("kinect range") #camera ve rangefinder başlatıldı Camera.enable(kinectColor, timestep) RangeFinder.enable(kinectRange, timestep) # motorların tagını getirir #motorları getirir solMotor = robot.getMotor("left wheel") sağMotor = robot.getMotor("right wheel") #motorları hareket etirir solMotor.setPosition(float("inf")) sağMotor.setPosition(float("inf")) ps = [] psNames = ["so0", "so1", "so2", "so3", "so4", "so5", "so6", "so7"] for i in range(8):
# create the Robot instance. driver = Driver() # get the time step of the current world. timestep = int(driver.getBasicTimeStep()) Max_hizi = 80 ileri_hizi = 20 fren = 0 sayici = 0 plot = 10 camera = driver.getCamera("camera") Camera.enable(camera, timestep) lms291 = driver.getLidar("Sick LMS 291") Lidar.enable(lms291, timestep) lms291_yatay = Lidar.getHorizontalResolution(lms291) fig = plt.figure(figsize=(3, 3)) # Main loop: # - perform simulation steps until Webots is stopping the controller while driver.step() != -1: Camera.getImage(camera) Camera.saveImage(camera, "camera.png", 1) frame = cv2.imread("camera.png")
def respond(result, data=None): cmd = {} cmd["result"] = result cmd["data"] = data send_msg(pickle.dumps(cmd)) def continous_timestep(): while robot.step(timestep) != -1: pass robot = Robot() cameraRGB = Camera("cameraRGB") cameraDepth = RangeFinder("cameraDepth") timestep = int(robot.getBasicTimeStep()) current_task = "idle" args = None command_is_executing = False print_once_flag = True rgb_enabled = False depth_enabled = False server = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server.bind(('localhost', 2001)) server.listen() print("Waiting for connection") robot.step(1) # webots won't print without a step
class Vehicle: stage = 0 MAX_SPEED = 2 time_tmp = 0 robot = Robot() motors = [] camera = Camera('camera') compass = Compass('compass') ds = robot.getDistanceSensor('distance sensor') #tmp picked = False def __init__(self): #get motors, camera and initialise them. motorNames = ['left motor', 'right motor', 'tower rotational motor'] for i in range(3): self.motors.append(self.robot.getMotor(motorNames[i])) self.motors[i].setPosition(float('inf')) if i <= 1: self.motors[i].setVelocity(0) else: self.motors[i].setVelocity(0.2) self.motors[i].setPosition(0) self.camera.enable(int(self.robot.getBasicTimeStep())) self.compass.enable(int(self.robot.getBasicTimeStep())) self.ds.enable(int(self.robot.getBasicTimeStep())) def getStage(self): return self.stage def setStage(self, stage_num): self.stage = stage_num def getCompass(self): return np.angle(complex(self.compass.getValues()[0], self.compass.getValues()[2])) def getDistanceValue(self): return self.ds.getValue() def towerSeeLeft(self): self.motors[2].setPosition(np.pi / 2) def towerSeeRight(self): self.motors[2].setPosition(-np.pi / 2) def towerRestore(self): self.motors[2].setPosition(0) def releaseFood(self): #release food pass #Speed setting functions def setSpeed(self, left, right): #set speed for four tracks self.motors[0].setVelocity(left) self.motors[1].setVelocity(right) def turnRound(self, diff): #set speed for turning left or right global error_integral global previous_diff#set variable as global will accelerate the speed error_integral += diff * ts; error_derivative = (previous_diff - diff) / ts; Vc = 0.5* diff + 0.00 * error_derivative + 0.05* error_integral ; #set as 0.35/0.001/0.02 for mass=40kg #set as 0.5/0/0.05 respectively for mass=400kg if Vc > 1: Vc = 1 if abs(diff) < 0.001: self.setSpeed(0, 0) return False else: self.setSpeed(-Vc, Vc) previous_diff = diff return True def linePatrol(self): #get camera image, process it and set speed based on line patrolling algorithm #return False if there is no line pass def boxPatrol(self): #get camera image and find orange box, then adjust the speed to go to the box #return False if there is no box pass def bridgePatrol(self): #get camera image and find bridge, then adjust the speed to go to the bridge #return False if there is no bridge pass def archPatrol(self): #get camera image and find arch, then adjust the speed to go to the arch #return False if there is no arch pass def colourPatrol(self): #for task 5 pass
roda_0 = robot.getMotor("wheel1") roda_1 = robot.getMotor("wheel2") roda_2 = robot.getMotor("wheel3") roda_3 = robot.getMotor("wheel4") rodas_l = [roda_0, roda_1, roda_2, roda_3] finger_0 = robot.getMotor("finger1") finger_1 = robot.getMotor("finger2") joint_list = [joint_2, finger_0, finger_1] finger_list = [finger_0, finger_1] wj = [rodas_l, joint_list] c = Camera("camera") c.enable(samplingPeriod=2000) # importing the tesseract executable again pytesseract.pytesseract.tesseract_cmd = t_path def robot_initial(): while robot.step(timestep) != -1: c.enable(samplingPeriod=100) # here the robot goes forward to the tags for r in rodas_l: r.setPosition(n0) base_braço.setPosition(1.5)
# env.environment.set_goal_position(goal_info) '''.................................''' # head + tail name pipe read_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in read_name_down] write_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in write_name_down] all_path = read_name_list + write_name_list # print(all_path) make_pipe(all_path) obs_pipe_down, touch_pipe_down, reward_pipe_down, over_pipe_down, terminal_pipe_down = open_write_pipe( write_name_list) action_pipe_down, reset_pipe_down = open_read_pipe(read_name_list) CAM_A = Camera("CAM_A") CAM_A.enable(32) CAM_A.recognitionEnable(32) CAM_B = Camera("CAM_B") CAM_B.enable(32) CAM_B.recognitionEnable(32) CAM_C = Camera("CAM_C") CAM_C.enable(32) CAM_C.recognitionEnable(32) CAM_D = Camera("CAM_D") CAM_D.enable(32) CAM_D.recognitionEnable(32) ''' ik code------------------------------------------------------------------------------------------------------ ''' #position Sensor
import matplotlib.pyplot as plt import numpy as np import math TIME_STEP = 64 # ms MAX_SPEED = 100 # km/h driver = Driver() speedFoward = 0.1 * MAX_SPEED # km/h speedBrake = 0 # km/h cont = 0 plot = 10 cameraRGB = driver.getCamera('camera') Camera.enable(cameraRGB, TIME_STEP) lms291 = driver.getLidar('Sick LMS 291') print(lms291) Lidar.enable(lms291, TIME_STEP) lms291_width = Lidar.getHorizontalResolution(lms291) print(lms291_width) fig = plt.figure(figsize=(3, 3)) while driver.step() != -1: if cont < 1000: driver.setDippedBeams(True) # farol ligado driver.setIndicator(0) # 0 -> OFF 1 -> Right 2 -> Left driver.setCruisingSpeed(speedFoward) # acelerador (velocidade)
"""seguidor_linha controller""" from controller import Robot from controller import Camera import cv2 import numpy as np import math import time robot = Robot() timestep = int(robot.getBasicTimeStep()) #Camera camera = Camera("camera") camera.enable(timestep) MAX_MOTORS = 16 L1 = 9 # UPPER_LEG_LENGTH [cm] L2 = 8.5 # LOWER_LEG_LENGTH [cm] M_PI = 3.14159265358979323846 gait_type = 5 i = 0 key = -1 backward = True slf = 1 slf_min = 0 slf_max = 1 stride_length_factor = [slf, slf, slf, slf] freq_min = 0.4 freq_max = 2 freq = 1.5 freq_offset = 0.2
# env.environment.set_goal_position(goal_info) '''.................................''' # head + tail name pipe read_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in read_name_down] write_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in write_name_down] all_path = read_name_list + write_name_list # print(all_path) make_pipe(all_path) obs_pipe_down, touch_pipe_down, reward_pipe_down, over_pipe_down, terminal_pipe_down = open_write_pipe( write_name_list) action_pipe_down, reset_pipe_down = open_read_pipe(read_name_list) CAM_A = Camera("CAM_A") CAM_A.enable(32) CAM_A.recognitionEnable(32) ''' initial_obs, initial_state = initial_step() write_to_pipe([obs_pipe, touch_pipe], [initial_obs, initial_state]) print(np.array(initial_obs).shape, initial_state) ''' initial_state = initial_step() print("init_state: {}".format(initial_state)) write_to_pipe(touch_pipe, initial_state) class TaskType(Enum): UP = 'up_dopamine' DOWN = 'down_dopamine'
MAX_SPEED = 6.28 # maximal value returned by the sensors MAX_SENSOR_VALUE = 1024 # minimal distance, in meters, for an obstacle to be considered MIN_DISTANCE = 1.0 # create the robot instance robot = Robot() # inicializa kinect kinectColor = robot.getCamera('kinect color') kinectDepth = robot.getRangeFinder('kinect range') print(kinectColor) Camera.enable(kinectColor, TIME_STEP) RangeFinder.enable(kinectDepth, TIME_STEP) # get a handler to the motors and set target position to infinity (speed control) leftMotorFront = robot.getMotor('front left wheel') rightMotorFront = robot.getMotor('front right wheel') leftMotorBack = robot.getMotor('back left wheel') rightMotorBack = robot.getMotor('back right wheel') leftMotorFront.setPosition(float('inf')) rightMotorFront.setPosition(float('inf')) leftMotorBack.setPosition(float('inf')) rightMotorBack.setPosition(float('inf')) # initialize devices ps = []
print("I am next to trash") # this function same as above function. It focuses the bin. def foundbin(k, l): print("bin is detected") robot.setSpeed(k, l) # same as "foundtrash" method. if ((k >= 190 and l >= 190) and (left7 >= 140 or right0 >= 140) and (red[20] >= 100 and green[20] <= 10 and blue[20] <= 10)): led[0].set(0) print(" I am next to bin") # enable camera. camera = Camera("camera") camera.enable(timestep * 2) print("Camera width = ", camera.getWidth(), "Camera height =", camera.getHeight()) # enable LEDs led = [0] * 8 count = 0 for i in range(8): name = "led" + str(i) led[i] = LED(name) robot.enableEncoders(timestep) # enable distance and ground sensors irLeft7 = DistanceSensor("ps7") irLeft6 = DistanceSensor("ps6")