Example #1
0
    def __init__(self,
                 timesteps=32,
                 gamma=0.99,
                 epsilon=1.0,
                 epsilon_min=0.01,
                 epsilon_log_decay=0.99,
                 alpha=0.01):
        self.supervisor = Supervisor()
        self.robot_node = self.supervisor.getFromDef("MY_BOT")
        if self.robot_node is None:
            sys.stderr.write(
                "No DEF MY_ROBOT node found in the current world file\n")
            sys.exit(1)
        self.trans_field = self.robot_node.getField("translation")
        self.rot_field = self.robot_node.getField("rotation")
        self.timestep = timesteps

        self.camera = Camera('camera')
        self.camera.enable(self.timestep)
        self.init_image = self.get_image()

        self.timestep = timesteps
        self.receiver = Receiver('receiver')
        self.receiver.enable(self.timestep)
        self.emitter = Emitter('emitter')

        self.memory = deque(maxlen=50000)
        self.batch_size = 128
        self.alpha = alpha
        self.gamma = gamma
        self.epsion_init = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_log_decay

        self.pre_state = self.init_image
        self.pre_action = -1

        self.pre_go_straight = False
        self.reward = 0
        self.step = 0
        self.max_step = 200
        self.file = None

        # interactive
        self.feedbackProbability = 0
        self.feedbackAccuracy = 1
        self.PPR = False
        self.feedbackTotal = 0
        self.feedbackAmount = 0

        self.init_model()
        self.init_parametter()
    def setupDevice(self):
        self.leftMotor = self.robot.getDevice('left wheel motor')
        self.rightMotor = self.robot.getDevice('right wheel motor')
        self.leftMotor.setPosition(float('inf'))
        self.rightMotor.setPosition(float('inf'))

        self.rightDistanceSensor = self.robot.getDevice('ds1')
        self.leftDistanceSensor = self.robot.getDevice('ds0')
        self.rightDistanceSensor.enable(self.timestep)
        self.leftDistanceSensor.enable(self.timestep)

        self.gps = self.robot.getDevice('gps')
        self.touchSensor1 = self.robot.getDevice('touch_sensor1')
        self.touchSensor2 = self.robot.getDevice('touch_sensor2')
        self.touchSensor3 = self.robot.getDevice('touch_sensor3')
        self.touchSensor4 = self.robot.getDevice('touch_sensor4')
        self.touchSensor5 = self.robot.getDevice('touch_sensor5')
        self.gps.enable(self.timestep)
        self.touchSensor1.enable(self.timestep)
        self.touchSensor2.enable(self.timestep)
        self.touchSensor3.enable(self.timestep)
        self.touchSensor4.enable(self.timestep)
        self.touchSensor5.enable(self.timestep)

        self.camera = Camera('camera')
        self.camera.enable(self.timestep)

        self.leftMotor.setVelocity(0)
        self.rightMotor.setVelocity(0)

        self.init_leftValue = self.leftDistanceSensor.getValue()
        self.init_rightValue = self.rightDistanceSensor.getValue()

        self.receiver = Receiver('receiver')
        self.emitter = Emitter('emitter')
        self.receiver.enable(self.timestep)

rospy.init_node('camera_test_node')
robot = Robot()
global camPub
global rangePub
camPub = rospy.Publisher('/camera/image', Image, queue_size=20)
rangePub = rospy.Publisher('/range_finder/image', Image, queue_size=20)
# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())
SAMPLE_TIME = 100
camera = robot.getDevice('camera')
depth = robot.getDevice('range-finder')
global rscam
global depthcam
rscam = Camera('camera')
depthcam = RangeFinder('range-finder')
depthcam.enable(SAMPLE_TIME)
rscam.enable(SAMPLE_TIME)
rospy.Subscriber('publish_images', Bool, camera_CB)

clockPublisher = rospy.Publisher('clock', Clock, queue_size=1)
if not rospy.get_param('use_sim_time', False):
    rospy.logwarn('use_sim_time is not set!')

# Main loop:
# - perform simulation steps until Webots is stopping the controller
while robot.step(timestep) != -1 and not rospy.is_shutdown():
    msg = Clock()
    time = robot.getTime()
    msg.clock.secs = int(time)
# env.environment.set_goal_position(goal_info)
'''.................................'''

# head + tail name pipe
read_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in read_name_down]
write_name_list = [(i + "%s.pipe" % (channel_down + 1))
                   for i in write_name_down]
all_path = read_name_list + write_name_list
# print(all_path)
make_pipe(all_path)

obs_pipe_down, touch_pipe_down, reward_pipe_down, over_pipe_down, terminal_pipe_down = open_write_pipe(
    write_name_list)
action_pipe_down, reset_pipe_down = open_read_pipe(read_name_list)

CAM_A = Camera("CAM_A")
CAM_A.enable(32)
CAM_A.recognitionEnable(32)
CAM_B = Camera("CAM_B")
CAM_B.enable(32)
CAM_B.recognitionEnable(32)
CAM_C = Camera("CAM_C")
CAM_C.enable(32)
CAM_C.recognitionEnable(32)
CAM_D = Camera("CAM_D")
CAM_D.enable(32)
CAM_D.recognitionEnable(32)
'''
ik code------------------------------------------------------------------------------------------------------
'''
#position Sensor
Example #5
0
                       'LegLowerR', 'LegLowerL', 'AnkleR', 'AnkleL',
                       'FootR', 'FootL', 'Neck', 'Head')
# List of position sensor devices.
positionSensors = []

# Create the Robot instance.
robot = Robot()
robot.getSupervisor()


basicTimeStep = int(robot.getBasicTimeStep())
# print(robot.getDevice("camera"))
camera1=robot.getCamera("Camera")
print(camera1)
# camera= Camera(camera1)
camera= Camera('Camera')
# print(robot.getCamera('Camera'))
# camera.wb_camera_enable()
mTimeStep=basicTimeStep
camera.enable(int(mTimeStep))
camera.getSamplingPeriod()
# width=camera.getWidth()
# height=camera.getHeight()
firstimage=camera.getImage()
ori_width = int(4 * 160)  # 原始图像640x480
ori_height = int(3 * 160)
r_width = int(4 * 20)  # 处理图像时缩小为80x60,加快处理速度,谨慎修改!
r_height = int(3 * 20)
color_range = {'yellow_door': [(10, 43, 46), (34, 255, 255)],
               'red_floor1': [(0, 43, 46), (10, 255, 255)],
               'red_floor2': [(156, 43, 46), (180, 255, 255)],
Example #6
0
class Vehicle:
    stage = 0
    MAX_SPEED = 2
    time_tmp = 0
    robot = Robot()
    motors = []
    camera = Camera('camera')
    compass = Compass('compass')
    ds = robot.getDistanceSensor('distance sensor')

    #tmp
    picked = False

    def __init__(self):
        #get motors, camera and initialise them.
        motorNames = ['left motor', 'right motor', 'tower rotational motor']
        for i in range(3):
            self.motors.append(self.robot.getMotor(motorNames[i]))
            self.motors[i].setPosition(float('inf'))
            if i <= 1:
                self.motors[i].setVelocity(0)
            else:
                self.motors[i].setVelocity(0.2)
                self.motors[i].setPosition(0)
        
        self.camera.enable(int(self.robot.getBasicTimeStep()))
        self.compass.enable(int(self.robot.getBasicTimeStep()))
        self.ds.enable(int(self.robot.getBasicTimeStep()))

        
    def getStage(self):
        return self.stage
    def setStage(self, stage_num):
        self.stage = stage_num
    def getCompass(self):
        return np.angle(complex(self.compass.getValues()[0], self.compass.getValues()[2]))
    def getDistanceValue(self):
        return self.ds.getValue()
    def towerSeeLeft(self):
        self.motors[2].setPosition(np.pi / 2)
    def towerSeeRight(self):
        self.motors[2].setPosition(-np.pi / 2)
    def towerRestore(self):
        self.motors[2].setPosition(0)
    def releaseFood(self):
        #release food
        pass
            
        
        
#Speed setting functions
    def setSpeed(self, left, right):
        #set speed for four tracks
        self.motors[0].setVelocity(left)
        self.motors[1].setVelocity(right)
    def turnRound(self, diff):
        #set speed for turning left or right
        global error_integral
        global previous_diff#set variable as global will accelerate the speed
        error_integral += diff * ts;
        error_derivative = (previous_diff - diff) / ts;
        Vc = 0.5* diff  + 0.00 * error_derivative + 0.05* error_integral ;
        #set as 0.35/0.001/0.02 for mass=40kg
        #set as 0.5/0/0.05 respectively for mass=400kg
        if Vc > 1:
            Vc = 1
        if abs(diff) < 0.001:
            self.setSpeed(0, 0)
            return False
        else:
            self.setSpeed(-Vc, Vc)
            previous_diff = diff
            return True

    def linePatrol(self):
        #get camera image, process it and set speed based on line patrolling algorithm
        #return False if there is no line
        pass
    def boxPatrol(self):
        #get camera image and find orange box, then adjust the speed to go to the box
        #return False if there is no box
        pass
    def bridgePatrol(self):
        #get camera image and find bridge, then adjust the speed to go to the bridge
        #return False if there is no bridge
        pass
    def archPatrol(self):
        #get camera image and find arch, then adjust the speed to go to the arch
        #return False if there is no arch
        pass
    def colourPatrol(self):
        #for task 5
        pass
Example #7
0
"""data_collector controller."""
import random
import time
import csv
import numpy as np
from controller import Robot, Camera, PositionSensor

# create the Robot instance.
robot = Robot()

# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())

# get and enable Camera
cameraTop = Camera("CameraTop")
cameraTop.enable(timestep)

print("Width: ", cameraTop.getWidth())
print("Height: ", cameraTop.getHeight())
#cameraTop.saveImage("cameraImg.png", 50)

cameraBottom = Camera("CameraBottom")
cameraBottom.enable(timestep)

#Move head in right position
#HeadYaw = robot.getMotor("HeadYaw")
#HeadPitch = robot.getMotor("HeadPitch")

#HeadYaw.setPosition(0.5)
#HeadYaw.setVelocity(1.0)
#HeadPitch.setPosition(0.2)
Example #8
0
    robot.getMotor('leg5_servo1'),
    robot.getMotor('leg5_servo2'),
]
#  ds = robot.getDistanceSensor('dsname')
#  ds.enable(timestep)

def move_forward():
    motor_lst[1 + 0*3].setPosition(math.pi * -1 / 8)
    motor_lst[1 + 0*3].setVelocity(1.0)
    
def rotate(angle):
    for i in range(6):
        motor_lst[0 + i*3].setPosition(angle)
        motor_lst[0 + i*3].setVelocity(1.0)

camera = Camera("camera_d435i")
camera.enable(15)
print(camera.getSamplingPeriod())
camera.saveImage("~/test.png", 100)

gyro = Gyro("gyro")
gyro.enable(60)

inertial_unit = InertialUnit("inertial_unit")
inertial_unit.enable(60)

# Main loop:
# - perform simulation steps until Webots is stopping the controller
def default_low_pos():
    for i in range(6):
        motor_lst[0 + i*3].setPosition(0)
Example #9
0
from controller import Robot, Motor, DistanceSensor
from ikpy.chain import Chain
import numpy as np
import time
from controller import Camera, Device, CameraRecognitionObject

robot = Robot()

camera = Camera("camera")

camera.enable(20)

#firstObject = Camera.getRecognitionObjects()[0]
#id = firstObject.get_id()
#position = firstObject.get_position()
Example #10
0
class Vehicle:
    stage = 0
    MAX_SPEED = 2
    robot = Robot()
    motors = []
    armMotors = []
    armSensors = []
    armPs = []
    camera = Camera('camera')
    compass = Compass('compass')
    ds = robot.getDistanceSensor('distance sensor')
    ds2 = robot.getDistanceSensor('distance sensor2')

    HEIGHT = camera.getHeight()
    WIDTH = camera.getWidth()
    foodStage = 0
    p0 = WIDTH / 2
    leftSpeed = 0.0
    rightSpeed = 0.0
    leftSum = 0
    rightSum = 0
    HALF = int(WIDTH / 2)
    frame = np.zeros((HEIGHT, WIDTH))
    blur_im = np.zeros((HEIGHT, WIDTH))
    position = WIDTH / 2

    def __init__(self):
        #get motors, camera and initialise them.
        motorNames = ['left motor', 'right motor',
                      'tower rotational motor']  #, 'trigger motor']
        for i in range(3):
            self.motors.append(self.robot.getMotor(motorNames[i]))
            self.motors[i].setPosition(float('inf'))
            if i <= 1:
                self.motors[i].setVelocity(0)
            else:
                self.motors[i].setVelocity(0.8)
                self.motors[i].setPosition(0)

        armMotorNames = [
            "arm_motor_1", "arm_motor_2", "arm_motor_3", "arm_motor_4",
            "arm_motor_5", "arm_motor_6", "arm_motor_7", "arm_motor_8"
        ]
        for i in range(len(armMotorNames)):
            self.armMotors.append(self.robot.getMotor(armMotorNames[i]))
            self.armMotors[i].setPosition(0)
            self.armMotors[i].setVelocity(0.3)

        armSensorNames = [
            "arm_position_sensor_1", "arm_position_sensor_2",
            "arm_position_sensor_3", "arm_position_sensor_4",
            "arm_position_sensor_5", "arm_position_sensor_6",
            "arm_position_sensor_7", "arm_position_sensor_8"
        ]
        for i in range(len(armSensorNames)):
            self.armSensors.append(
                self.robot.getPositionSensor(armSensorNames[i]))
            self.armSensors[i].enable(int(self.robot.getBasicTimeStep()))

        self.camera.enable(int(self.robot.getBasicTimeStep()))
        self.compass.enable(int(self.robot.getBasicTimeStep()))
        self.ds.enable(int(self.robot.getBasicTimeStep()))
        self.ds2.enable(int(self.robot.getBasicTimeStep()))

    def getStage(self):
        return self.stage

    def setStage(self, stage_num):
        self.stage = stage_num

    def getCompass(self):
        return np.angle(
            complex(self.compass.getValues()[0],
                    self.compass.getValues()[2]))

    def getDistanceValue(self, input_ds=1):
        if input_ds == 1:
            return self.ds.getValue()
        if input_ds == 2:
            return self.ds2.getValue()

    def towerSeeLeft(self):
        self.motors[2].setPosition(np.pi / 2)

    @staticmethod
    def towerSeeRight():
        Vehicle.motors[2].setPosition(-np.pi / 2)

    def towerRestore(self):
        self.motors[2].setPosition(0)

    def pickFood(self):
        if self.foodStage == 0:
            self.armMotors[0].setPosition(0.039)  #upper-arm extend
            self.armMotors[1].setPosition(0.93)  #upper-arm rotate
            if (self.armSensors[1].getValue() > 0.92):
                self.armMotors[2].setPosition(0.038)  #mid-arm extend
                #print(sensor3.getValue())
                if (self.armSensors[2].getValue() > 0.0379):
                    self.armMotors[4].setPosition(0.005)  #grab the box
                    self.armMotors[5].setPosition(0.005)
                    if ((self.armSensors[4].getValue() >= 0.00269)
                            or (self.armSensors[5].getValue() >= 0.0031)):
                        self.foodStage = 1
        elif self.foodStage == 1:
            self.armMotors[2].setPosition(0)  #raise the box
            self.armMotors[2].setVelocity(.1)
            self.armMotors[1].setPosition(0)
            self.armMotors[6].setPosition(0.9)
            if (self.armSensors[1].getValue() < 0.002):
                self.armMotors[0].setPosition(0)
                self.armMotors[0].setVelocity(0.01)  #grabbing task finished
                if (self.armSensors[0].getValue() < 0.003):
                    return True
        return False

    def releaseFood(self):
        if (self.armSensors[6].getValue() > 0.899):
            self.armMotors[3].setPosition(0.06)
            self.armMotors[7].setPosition(0.06)
            if ((self.armSensors[3].getValue() > 0.059)
                    and (self.armSensors[7].getValue() > 0.059)):
                self.armMotors[4].setPosition(0)
                self.armMotors[5].setPosition(0)
                if ((self.armSensors[4].getValue() <= 0.00269)
                        or (self.armSensors[5].getValue() >= 0.0031)):
                    self.armMotors[3].setPosition(0)
                    self.armMotors[7].setPosition(0)
                    self.armMotors[6].setPosition(0)
                    return True

#Img Processing related

    @staticmethod
    def get_frame():
        HEIGHT = Vehicle.HEIGHT
        WIDTH = Vehicle.WIDTH
        camera = Vehicle.camera
        cameraData = camera.getImage()

        # get image and process it
        frame = np.zeros((HEIGHT, WIDTH))
        for x in range(0, Vehicle.WIDTH):
            for y in range(0, HEIGHT):
                gray = int(camera.imageGetGray(cameraData, WIDTH, x, y))
                frame[y][x] = gray

        return frame

    @staticmethod
    def edge_detect(frame,
                    threshold=254,
                    maxVal=255,
                    kernel_1=20,
                    kernel_2=15):
        # threshold
        # Edge_LineTracking_T1:254; Edge_Bridge_detection_T3:180; Edge_Arch_detection_T4:100;Edge_Color_detect_T5:254
        # maxVal
        # Edge_LineTracking_T1:255; Edge_Bridge_detection_T3:255; Edge_Arch_detection_T4:255;Edge_Color_detect_T5:255
        # kernel_1
        # Edge_LineTracking_T1:20; Edge_Bridge_detection_T3:20; Edge_Arch_detection_T4:20;Edge_Color_detect_T5:20
        # kernel_2
        # Edge_LineTracking_T1:15; Edge_Bridge_detection_T3:15; Edge_Arch_detection_T4:15;Edge_Color_detect_T5:15
        # edge_detect Sobel
        x = cv2.Sobel(frame, cv2.CV_16S, 1, 0)
        y = cv2.Sobel(frame, cv2.CV_16S, 0, 1)
        absx = cv2.convertScaleAbs(x)
        absy = cv2.convertScaleAbs(y)
        dst = cv2.addWeighted(absx, 0.5, absy, 0.5, 0)
        #cv2.imwrite("dst.jpg", dst)
        # to binary
        ret, binary = cv2.threshold(dst, threshold, maxVal, cv2.THRESH_BINARY)
        #cv2.imwrite("binary.jpg", binary)
        # Smooth
        kernel1 = np.ones((kernel_1, kernel_1), np.float) / 25
        smooth = cv2.filter2D(binary, -1, kernel1)
        # blur
        blur_im = cv2.boxFilter(smooth, -1, (kernel_2, kernel_2), normalize=1)
        #cv2.imwrite("blur.jpg", blur_im)
        return blur_im

    @staticmethod
    def positioning(blur_im,
                    type_select=0,
                    height_per_1=0.7,
                    height_per_2=0.8):
        # height_per_1
        # Edge_LineTracking_T1:0.7; Edge_Bridge_detection_T3:0.3; Edge_Arch_detection_T4:0.3;Edge_Color_detect_T5:0.6
        # height_per_2
        # Edge_LineTracking_T1:0.8; Edge_Bridge_detection_T3:0.6; Edge_Arch_detection_T4:0.8;Edge_Color_detect_T5:0.7
        HEIGHT = Vehicle.HEIGHT
        WIDTH = Vehicle.WIDTH
        HALF = Vehicle.HALF
        axis = 0
        num = 0
        position = 0
        axis_l = 0
        axis_r = 0
        num_l = 0
        num_r = 0
        position_l = 0
        position_r = WIDTH - 1
        if type_select == 0:
            for axis_v in range(int(HEIGHT * height_per_1),
                                int(HEIGHT * height_per_2)):
                for axis_h in range(WIDTH * 0, WIDTH):
                    if blur_im[axis_v][axis_h] != 0:
                        axis = axis + axis_h
                        num = num + 1
            if num:
                position = axis / num + 1

        elif type_select == 1:
            for axis_v in range(int(HEIGHT * height_per_1),
                                int(HEIGHT * height_per_2)):
                for axis_h in range(WIDTH * 0, HALF):
                    if blur_im[axis_v][axis_h] != 0:
                        axis_l = axis_l + axis_h
                        num_l = num_l + 1
                    if blur_im[axis_v][axis_h + HALF] != 0:
                        axis_r = axis_r + axis_h + HALF
                        num_r = num_r + 1
            if num_l:
                position_l = axis_l / num_l + 1
            if num_r:
                position_r = axis_r / num_r + 1
            position = (position_l + position_r) / 2

        else:
            print('INPUT ERROR!:positioning:type_select')
        return position

    @staticmethod
    def steering(position,
                 type_select=0,
                 rectify_pixel=0,
                 base_speed=3.0,
                 straight_speed=2.0):
        # rectify_pixel
        # Edge_LineTracking_T1:7; Edge_Bridge_detection_T3:5; Edge_Arch_detection_T4:5;Edge_Color_detect_T5:3
        # base_speed
        # Edge_LineTracking_T1:3; Edge_Bridge_detection_T3:5; Edge_Arch_detection_T4:5;Edge_Color_detect_T5:20
        # straight_speed
        # Edge_LineTracking_T1:2; Edge_Bridge_detection_T3:3; Edge_Arch_detection_T4:2;Edge_Color_detect_T5:2
        WIDTH = Vehicle.WIDTH
        if type_select == 0:
            if abs(position - WIDTH / 2) > rectify_pixel:
                leftSpeed = (position / WIDTH) * base_speed
                rightSpeed = (1 - position / WIDTH) * base_speed
            else:
                leftSpeed = straight_speed
                rightSpeed = straight_speed
        elif type_select == 1:
            if abs(position - WIDTH / 2) > rectify_pixel:
                leftSpeed = (position / WIDTH - 0.5) * base_speed
                rightSpeed = (0.5 - position / WIDTH) * base_speed
            else:
                leftSpeed = straight_speed
                rightSpeed = straight_speed
        else:
            print('INPUT ERROR!:steering:type_select')
        return leftSpeed, rightSpeed


#Speed setting functions

    def setSpeed(self, left, right):
        #set speed for four tracks
        self.motors[0].setVelocity(left)
        self.motors[1].setVelocity(right)

    def turnRound(self, diff):
        #set speed for turning left or right
        if abs(diff) < 0.001:
            self.setSpeed(0, 0)
            return False
        else:
            self.setSpeed(-0.3 * diff, 0.3 * diff)
            return True

    def linePatrol(self):
        #get camera image, process it and set speed based on line patrolling algorithm
        #return False if there is no line
        image = self.camera.getImage()
        leftSum = 0
        rightSum = 0
        leftSpeed = 0
        rightSpeed = 0
        cameraData = self.camera.getImage()
        HEIGHT = self.camera.getHeight()
        WIDTH = self.camera.getWidth()
        frame = np.zeros((HEIGHT, WIDTH))
        for x in range(0, HEIGHT):
            for y in range(0, WIDTH):
                frame[y][x] = int(
                    self.camera.imageGetGray(cameraData, WIDTH, x, y))
        absX = cv2.convertScaleAbs(cv2.Sobel(frame, cv2.CV_16S, 1, 0))
        absY = cv2.convertScaleAbs(cv2.Sobel(frame, cv2.CV_16S, 0, 1))
        # to binary
        ret, binary = cv2.threshold(cv2.addWeighted(absX, 0.5, absY, 0.5, 0),
                                    254, 255, cv2.THRESH_BINARY)
        binaryImg = cv2.boxFilter(cv2.filter2D(
            binary, -1,
            np.ones((20, 20), np.float) / 25),
                                  -1, (15, 15),
                                  normalize=1)

        positionSum = 0
        positionCount = 0
        for i in range(int(HEIGHT * 0.75), HEIGHT):
            for j in range(WIDTH * 0, WIDTH):
                if binaryImg[i][j] != 0:
                    positionSum += j
                    positionCount += 1
        farpositionSum = 0
        farpositionCount = 0
        for i in range(int(HEIGHT * 0.32), int(HEIGHT * 0.44)):
            for j in range(WIDTH * 0, WIDTH):
                if binaryImg[i][j] != 0:
                    farpositionSum += j
                    farpositionCount += 1
        if farpositionCount == 0:
            farcenter = 0
        else:
            farcenter = farpositionSum / farpositionCount
        if abs(farcenter - WIDTH / 2) < 0.1:
            self.motors[0].setAcceleration(0.3)
            self.motors[1].setAcceleration(0.3)
            #("straight")
        else:
            self.motors[0].setAcceleration(20)
            self.motors[1].setAcceleration(20)
            #print("not straight")
        if positionCount or farpositionCount:
            if positionCount == 0:
                center = 80
            else:
                center = positionSum / positionCount
            diff = 4 * (0.5 - center / WIDTH)
            if diff > 0.01:
                leftSpeed = (1 - 0.6 * diff) * self.MAX_SPEED
                rightSpeed = (1 - 0.3 * diff) * self.MAX_SPEED
            elif diff < -0.01:
                leftSpeed = (1 + 0.3 * diff) * self.MAX_SPEED
                rightSpeed = (1 + 0.6 * diff) * self.MAX_SPEED
            else:
                leftSpeed = self.MAX_SPEED
                rightSpeed = self.MAX_SPEED

            self.setSpeed(leftSpeed, rightSpeed)
            return True
        else:
            return False

    @staticmethod
    def linePatrol2():
        # Task 1
        # get_frame
        frame = Vehicle.get_frame()
        # edge_detect
        blur_im = Vehicle.edge_detect(frame, 100, 255, 20, 15)
        # positioning
        position = Vehicle.positioning(blur_im, 0, 0.7, 0.8)
        # steering
        leftSpeed, rightSpeed = Vehicle.steering(position, 0, 4, 0.8, 0.8)

        return leftSpeed, rightSpeed

    @staticmethod
    def box_found():
        # Task 2
        HALF = Vehicle.HALF
        frame = Vehicle.get_frame()
        x = cv2.Sobel(frame, cv2.CV_16S, 1, 0)
        y = cv2.Sobel(frame, cv2.CV_16S, 0, 1)
        absx = cv2.convertScaleAbs(x)
        absy = cv2.convertScaleAbs(y)
        dst = cv2.addWeighted(absx, 0.5, absy, 0.5, 0)
        cv2.imwrite("dst.jpg", dst)
        # to binary
        ret, filtered = cv2.threshold(dst, 50, 255, cv2.THRESH_TOZERO)
        cv2.imwrite("binary.jpg", filtered)
        position_i = Vehicle.positioning(filtered, 0, 0.32, 0.35)
        if abs(position_i - HALF) < 10:
            position = Vehicle.positioning(filtered, 0, 0.25, 0.3)
            if abs(
                    position - HALF
            ) < 10:  # this is a predict letting it stop exactly at the box center
                return True
            else:
                pass

    @staticmethod
    def bridge_found():
        # Task 3
        HALF = Vehicle.HALF
        WIDTH = Vehicle.WIDTH
        HEIGHT = Vehicle.HEIGHT
        count = 0
        num1 = 0
        num = 0
        axis = WIDTH
        position_i = 0
        frame = Vehicle.get_frame()
        x = cv2.Sobel(frame, cv2.CV_16S, 1, 0)
        y = cv2.Sobel(frame, cv2.CV_16S, 0, 1)
        absx = cv2.convertScaleAbs(x)
        absy = cv2.convertScaleAbs(y)
        dst = cv2.addWeighted(absx, 0.5, absy, 0.5, 0)
        # cv2.imwrite("dst.jpg", dst)
        # to binary
        ret, filtered = cv2.threshold(dst, 50, 255, cv2.THRESH_TOZERO)
        # cv2.imwrite("filtered.jpg", filtered)

        for axis_v in range(int(HEIGHT * 0.8), int(HEIGHT * 0.87)):
            for axis_h in range(HALF, WIDTH):
                if filtered[axis_v][axis_h] != 0:
                    axis = axis + axis_h
                    num1 = num1 + 1
        if num1:
            position_i = axis / num1 + 1
        if abs(position_i - 1.5 * HALF) < 10:
            # print('pi = ', position_i)
            for axis_v in range(int(HEIGHT * 0.82), int(HEIGHT * 0.85)):
                for axis_h in range(WIDTH * 0, int(WIDTH * 1)):
                    if filtered[axis_v][axis_h] != 0:
                        axis_i = axis_h
                        axis = min(axis_i, axis)
            # print('axis = ', axis)
            if abs(axis - 0.5 * WIDTH) < 5:
                return True
            else:
                return False

    def arch_found(self, a, b):
        # Task 4
        global count_arch
        HEIGHT = Vehicle.HEIGHT
        WIDTH = Vehicle.WIDTH
        camera = Vehicle.camera
        cameraData = camera.getImage()
        frame = np.zeros((HEIGHT, WIDTH))
        Q = 0

        position = 0
        for x in range(0, WIDTH):
            for y in range(0, HEIGHT):
                gray = int(camera.imageGetGray(cameraData, WIDTH, x, y))
                if 110 < gray < 130:
                    frame[y][x] = 255
                    Q = Q + 1
        else:
            pass
        #print(i)
        # cv2.imwrite('frame.jpg', frame)
        #blur_im = self.edge_detect(frame, 254, 255)
        #cv2.imwrite('blur.jpg', blur_im)
        if Q > 50:
            position = self.positioning(frame, 0, a, b)
            #print(i, position)
            if count_arch == 0:
                if int(position) < WIDTH / 2:
                    count_arch = 1
                    for i in range(2):
                        self.motors[i].setVelocity(0)
                    return 1
            else:
                if int(position) > WIDTH / 2:
                    for i in range(2):
                        self.motors[i].setVelocity(0)
                    return 1
                else:
                    return 0

        else:
            return 0

    def count(self):
        HEIGHT = Vehicle.HEIGHT
        WIDTH = Vehicle.WIDTH
        camera = Vehicle.camera
        cameraData = camera.getImage()
        i = 0
        for x in range(0, WIDTH):
            for y in range(0, HEIGHT):
                gray = int(camera.imageGetGray(cameraData, WIDTH, x, y))
                if 110 < gray < 130:
                    i = i + 1
        else:
            pass
        #print(i)
        return i

    @staticmethod
    def colourPatrol():
        # Task 5
        HEIGHT = Vehicle.HEIGHT
        WIDTH = Vehicle.WIDTH
        camera = Vehicle.camera
        cameraData = camera.getImage()
        frame = np.zeros((HEIGHT, WIDTH))
        color_kernel = np.zeros((3, 3))
        compass = Vehicle.getCompass(Vehicle)
        position = HEIGHT / 2
        leftSpeed = 0
        rightSpeed = 0
        global flag
        if (flag == -1) and (3.12 < abs(compass) < 3.15):
            for x in range(0, 3):
                for y in range(0, 3):
                    gray = int(
                        camera.imageGetGray(cameraData, WIDTH,
                                            x + int(0.5 * WIDTH),
                                            y + int(0.80 * HEIGHT)))
                    color_kernel[y][x] = gray

            gray_average = np.mean(color_kernel)
            # color classification
            if 100 < gray_average < 130:
                flag = 0  # red
            elif 175 < gray_average < 200:
                flag = 1  # yellow
            elif 140 < gray_average < 170:
                flag = 2  # purple
            else:
                pass
            # print('color flag=', flag)
            leftSpeed = 2.0
            rightSpeed = 2.0

        elif flag == 0 or flag == 1 or flag == 2:
            for x in range(0, 3):
                for y in range(0, 3):
                    gray = int(
                        camera.imageGetGray(cameraData, WIDTH,
                                            x + int(0.5 * WIDTH),
                                            y + int(0.95 * HEIGHT)))
                    color_kernel[y][x] = gray
            gray_average = np.mean(color_kernel)
            if 90 < gray_average < 100:
                flag = 3  # finished

            for y in range(0, HEIGHT):
                for x in range(0, WIDTH):
                    gray = int(camera.imageGetGray(cameraData, WIDTH, x, y))
                    # color classification
                    if flag == 0:
                        if 100 < gray < 130:
                            frame[y][x] = 255
                        else:
                            pass
                    elif flag == 1:
                        if 175 < gray < 200:
                            frame[y][x] = 255
                        else:
                            pass
                    elif flag == 2:
                        if 140 < gray < 170:
                            frame[y][x] = 255
                        else:
                            pass

            # edge_detect Sobel
            x = cv2.Sobel(frame, cv2.CV_16S, 1, 0)
            y = cv2.Sobel(frame, cv2.CV_16S, 0, 1)
            absX = cv2.convertScaleAbs(x)
            absY = cv2.convertScaleAbs(y)
            dst = cv2.addWeighted(absX, 0.5, absY, 0.5, 0)
            # to binary
            ret, binary = cv2.threshold(dst, 254, 255, cv2.THRESH_BINARY)
            # Smooth
            kernel1 = np.ones((20, 20), np.float) / 25
            smooth = cv2.filter2D(binary, -1, kernel1)
            # blur
            blur_im1 = cv2.boxFilter(smooth, -1, (15, 15), normalize=1)

            # cv2.imwrite('smooth_blur.jpg', blur_im1)
            # cv2.imwrite('gray.jpg', frame)
            # cv2.imwrite('frame.jpg', frame)
            # video.write(blur_im1)

            # Locate
            axis = 0
            num = 0
            for axis_v in range(int(HEIGHT * 0.8), int(HEIGHT * 0.9)):
                for axis_h in range(WIDTH * 0, WIDTH):
                    if blur_im1[axis_v][axis_h] != 0:
                        axis = axis + axis_h
                        num = num + 1
            if num:
                position = axis / num + 1
            # Steering
            if 5 <= abs(position - WIDTH / 2):
                leftSpeed = (position / WIDTH) * 1.5
                rightSpeed = (1 - position / WIDTH) * 1.5
            elif 2.5 < abs(position - WIDTH / 2) < 5:
                leftSpeed = (position / WIDTH) * 0.9
                rightSpeed = (1 - position / WIDTH) * 0.9
            # if lines are losted in the view of camera, try adjust parameters here
            else:
                leftSpeed = 1
                rightSpeed = 1
        else:
            pass
        return leftSpeed, rightSpeed
Example #11
0
# env.environment.set_goal_position(goal_info)
'''.................................'''

# head + tail name pipe
read_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in read_name_down]
write_name_list = [(i + "%s.pipe" % (channel_down + 1))
                   for i in write_name_down]
all_path = read_name_list + write_name_list
# print(all_path)
make_pipe(all_path)

obs_pipe_down, touch_pipe_down, reward_pipe_down, over_pipe_down, terminal_pipe_down = open_write_pipe(
    write_name_list)
action_pipe_down, reset_pipe_down = open_read_pipe(read_name_list)

CAM_A = Camera("CAM_A")
CAM_A.enable(32)
CAM_A.recognitionEnable(32)
'''
initial_obs, initial_state = initial_step()
write_to_pipe([obs_pipe, touch_pipe], [initial_obs, initial_state])
print(np.array(initial_obs).shape, initial_state)
'''
initial_state = initial_step()
print("init_state: {}".format(initial_state))
write_to_pipe(touch_pipe, initial_state)


class TaskType(Enum):
    UP = 'up_dopamine'
    DOWN = 'down_dopamine'
def respond(result, data=None):
    cmd = {}
    cmd["result"] = result
    cmd["data"] = data
    send_msg(pickle.dumps(cmd))


def continous_timestep():
    while robot.step(timestep) != -1:
        pass


robot = Robot()

cameraRGB = Camera("cameraRGB")
cameraDepth = RangeFinder("cameraDepth")
timestep = int(robot.getBasicTimeStep())

current_task = "idle"
args = None
command_is_executing = False
print_once_flag = True
rgb_enabled = False
depth_enabled = False

server = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server.bind(('localhost', 2001))
server.listen()
print("Waiting for connection")
robot.step(1)  # webots won't print without a step
Example #13
0
from controller import Robot
from controller import Camera
TIME_STEP = 64
robot = Robot()
#Define motors
motors = []
motorNames = ['left motor', 'right motor']
for i in range(2):
    motors.append(robot.getMotor(motorNames[i]))
    motors[i].setPosition(float('inf'))
    motors[i].setVelocity(0.0)
    motors[i].setAcceleration(25)

camera = Camera('camera')
camera.enable(int(robot.getBasicTimeStep()))
SPEED = 2
while robot.step(TIME_STEP) != -1:
    leftSpeed = 0.0
    rightSpeed = 0.0
    #get image and process it
    image = camera.getImage()
    leftSum = 0
    rightSum = 0
    cameraData = camera.getImage()

    for x in range(0, camera.getWidth()):
        for y in range(int(camera.getHeight() * 0.9), camera.getHeight()):
            gray = Camera.imageGetGray(cameraData, camera.getWidth(), x, y)
            if x < camera.getWidth() / 2:
                leftSum += gray
            else:
Example #14
0
from vehicle import Driver
from controller import Camera, Display, Keyboard
import cv2
import numpy as np
from numpy import array

car = Driver()
# cameraFront = Camera("cameraFront")
cameraTop = Camera("cameraTop")
display = Display("displayTop")
display.attachCamera(cameraTop)
keyboard = Keyboard()

# cameraFront.enable(32)
cameraTop.enable(32)
keyboard.enable(32)

while car.step() != -1:
    display.setColor(0x000000)
    display.setAlpha(0.0)
    display.fillRectangle(0, 0, display.getWidth(), display.getHeight())

    img = cameraTop.getImage()

    image = np.frombuffer(img, np.uint8).reshape(
        (cameraTop.getHeight(), cameraTop.getWidth(), 4))
    # cv2.imwrite("img.png", image)
    gray = cv2.cvtColor(np.float32(image), cv2.COLOR_RGB2GRAY)

    #--- vira a imagem da camera em 90 graus
    #gray270 = np.rot90(gray, 3)