コード例 #1
0
ファイル: my_controller.py プロジェクト: ex7763/step
    robot.getMotor('leg5_servo2'),
]
#  ds = robot.getDistanceSensor('dsname')
#  ds.enable(timestep)

def move_forward():
    motor_lst[1 + 0*3].setPosition(math.pi * -1 / 8)
    motor_lst[1 + 0*3].setVelocity(1.0)
    
def rotate(angle):
    for i in range(6):
        motor_lst[0 + i*3].setPosition(angle)
        motor_lst[0 + i*3].setVelocity(1.0)

camera = Camera("camera_d435i")
camera.enable(15)
print(camera.getSamplingPeriod())
camera.saveImage("~/test.png", 100)

gyro = Gyro("gyro")
gyro.enable(60)

inertial_unit = InertialUnit("inertial_unit")
inertial_unit.enable(60)

# Main loop:
# - perform simulation steps until Webots is stopping the controller
def default_low_pos():
    for i in range(6):
        motor_lst[0 + i*3].setPosition(0)
        motor_lst[0 + i*3].setVelocity(1.0)
コード例 #2
0
# create the Robot instance.
driver = Driver()

# get the time step of the current world.
timestep = int(driver.getBasicTimeStep())

Max_hizi = 80

ileri_hizi = 20
fren = 0
sayici = 0
plot = 10

camera = driver.getCamera("camera")
Camera.enable(camera, timestep)

lms291 = driver.getLidar("Sick LMS 291")
Lidar.enable(lms291, timestep)

lms291_yatay = Lidar.getHorizontalResolution(lms291)

fig = plt.figure(figsize=(3, 3))

# Main loop:
# - perform simulation steps until Webots is stopping the controller
while driver.step() != -1:

    Camera.getImage(camera)
    Camera.saveImage(camera, "camera.png", 1)
    frame = cv2.imread("camera.png")
コード例 #3
0
ファイル: my_controller2.py プロジェクト: robotclasses/hw1
positionSensors = []

# Create the Robot instance.
robot = Robot()
robot.getSupervisor()

basicTimeStep = int(robot.getBasicTimeStep())
# print(robot.getDevice("camera"))
camera1 = robot.getCamera("Camera")
print(camera1)
# camera= Camera(camera1)
camera = Camera('Camera')
# print(robot.getCamera('Camera'))
# camera.wb_camera_enable()
mTimeStep = basicTimeStep
print(camera.enable(int(mTimeStep)))
print(camera.getSamplingPeriod())
print(camera.getWidth())
print(camera.getHeight())
image = camera.getImage()
# print(image)
if image == None:
    print("none")
# print(image.size())
# cameradata = cv2.VideoCapture('Camera')
camera.saveImage('/home/luyi/webots.png', 100)
# print(len(cap))
# cv2.imshow("cap",cap)
# print(image[2][3][0])
# for x in range(0,camera.getWidth()):
# for y in range(0,camera.getHeight()):
コード例 #4
0
from controller import Robot, Motor, DistanceSensor
from ikpy.chain import Chain
import numpy as np
import time
from controller import Camera, Device, CameraRecognitionObject

robot = Robot()

camera = Camera("camera")

camera.enable(20)

#firstObject = Camera.getRecognitionObjects()[0]
#id = firstObject.get_id()
#position = firstObject.get_position()
コード例 #5
0
robot = Robot()
global camPub
global rangePub
camPub = rospy.Publisher('/camera/image', Image, queue_size=20)
rangePub = rospy.Publisher('/range_finder/image', Image, queue_size=20)
# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())
SAMPLE_TIME = 100
camera = robot.getDevice('camera')
depth = robot.getDevice('range-finder')
global rscam
global depthcam
rscam = Camera('camera')
depthcam = RangeFinder('range-finder')
depthcam.enable(SAMPLE_TIME)
rscam.enable(SAMPLE_TIME)
rospy.Subscriber('publish_images', Bool, camera_CB)

clockPublisher = rospy.Publisher('clock', Clock, queue_size=1)
if not rospy.get_param('use_sim_time', False):
    rospy.logwarn('use_sim_time is not set!')

# Main loop:
# - perform simulation steps until Webots is stopping the controller
while robot.step(timestep) != -1 and not rospy.is_shutdown():
    msg = Clock()
    time = robot.getTime()
    msg.clock.secs = int(time)
    # round prevents precision issues that can cause problems with ROS timers
    msg.clock.nsecs = round(1000 * (time - msg.clock.secs)) * 1.0e+6
    clockPublisher.publish(msg)
コード例 #6
0
from vehicle import Driver
from controller import Camera, Display, Keyboard
import cv2
import numpy as np
from numpy import array

car = Driver()
# cameraFront = Camera("cameraFront")
cameraTop = Camera("cameraTop")
display = Display("displayTop")
display.attachCamera(cameraTop)
keyboard = Keyboard()

# cameraFront.enable(32)
cameraTop.enable(32)
keyboard.enable(32)

while car.step() != -1:
    display.setColor(0x000000)
    display.setAlpha(0.0)
    display.fillRectangle(0, 0, display.getWidth(), display.getHeight())

    img = cameraTop.getImage()

    image = np.frombuffer(img, np.uint8).reshape(
        (cameraTop.getHeight(), cameraTop.getWidth(), 4))
    # cv2.imwrite("img.png", image)
    gray = cv2.cvtColor(np.float32(image), cv2.COLOR_RGB2GRAY)

    #--- vira a imagem da camera em 90 graus
    #gray270 = np.rot90(gray, 3)
コード例 #7
0
ファイル: kuka_test.py プロジェクト: synth-me/JASS_robot
roda_2 = robot.getMotor("wheel3")
roda_3 = robot.getMotor("wheel4")

rodas_l = [roda_0, roda_1, roda_2, roda_3]

finger_0 = robot.getMotor("finger1")
finger_1 = robot.getMotor("finger2")

joint_list = [joint_2, finger_0, finger_1]
finger_list = [finger_0, finger_1]

wj = [rodas_l, joint_list]

c = Camera("camera")

c.enable(samplingPeriod=2000)
# importing the tesseract executable again
pytesseract.pytesseract.tesseract_cmd = t_path


def robot_initial():
    while robot.step(timestep) != -1:
        c.enable(samplingPeriod=100)

        # here the robot goes forward to the tags
        for r in rodas_l:
            r.setPosition(n0)

        base_braço.setPosition(1.5)
        delay(8500)
        # here we start robot's whole process
コード例 #8
0
'''.................................'''

# head + tail name pipe
read_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in read_name_down]
write_name_list = [(i + "%s.pipe" % (channel_down + 1))
                   for i in write_name_down]
all_path = read_name_list + write_name_list
# print(all_path)
make_pipe(all_path)

obs_pipe_down, touch_pipe_down, reward_pipe_down, over_pipe_down, terminal_pipe_down = open_write_pipe(
    write_name_list)
action_pipe_down, reset_pipe_down = open_read_pipe(read_name_list)

CAM_A = Camera("CAM_A")
CAM_A.enable(32)
CAM_A.recognitionEnable(32)
CAM_B = Camera("CAM_B")
CAM_B.enable(32)
CAM_B.recognitionEnable(32)
CAM_C = Camera("CAM_C")
CAM_C.enable(32)
CAM_C.recognitionEnable(32)
CAM_D = Camera("CAM_D")
CAM_D.enable(32)
CAM_D.recognitionEnable(32)
'''
ik code------------------------------------------------------------------------------------------------------
'''
#position Sensor
positionSensorList = []
コード例 #9
0
#sensörün mesafede nasıl algı m
min_uzaklık = 1.0

# create the Robot instance.
robot = Robot()

# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())

#Camera ve RangeFinder getirir
kinectColor = robot.getCamera("kinect color")
kinectRange = robot.getRangeFinder("kinect range")

#camera ve rangefinder başlatıldı
Camera.enable(kinectColor, timestep)
RangeFinder.enable(kinectRange, timestep)

# motorların tagını getirir
#motorları getirir
solMotor = robot.getMotor("left wheel")
sağMotor = robot.getMotor("right wheel")

#motorları hareket etirir
solMotor.setPosition(float("inf"))
sağMotor.setPosition(float("inf"))

ps = []
psNames = ["so0", "so1", "so2", "so3", "so4", "so5", "so6", "so7"]

for i in range(8):
コード例 #10
0
        if print_once_flag:
            print("Waiting for command")
            robot.step(1)
            print_once_flag = False
        msg = recv_msg()
        if msg != None:
            cmd = pickle.loads(msg)
            print(cmd)
            current_task = cmd["name"]
            args = cmd["args"]
            print("Executing: " + current_task + " " + str(args))
            print_once_flag = True

    if current_task == "get_image":
        if not rgb_enabled:
            cameraRGB.enable(timestep)
            rgb_enabled = True
        else:
            np_img = np.array(cameraRGB.getImageArray(), dtype=np.uint8)
            respond("done", np_img)
            current_task = "idle"
            cameraRGB.disable()
            rgb_enabled = False

    if current_task == "get_depth":
        if not depth_enabled:
            cameraDepth.enable(timestep)
            depth_enabled = True
        else:
            np_dep = np.array(cameraDepth.getRangeImageArray())
            respond("done", np_dep)
コード例 #11
0
from controller import Robot
from controller import Camera
TIME_STEP = 64
robot = Robot()
#Define motors
motors = []
motorNames = ['left motor', 'right motor']
for i in range(2):
    motors.append(robot.getMotor(motorNames[i]))
    motors[i].setPosition(float('inf'))
    motors[i].setVelocity(0.0)
    motors[i].setAcceleration(25)

camera = Camera('camera')
camera.enable(int(robot.getBasicTimeStep()))
SPEED = 2
while robot.step(TIME_STEP) != -1:
    leftSpeed = 0.0
    rightSpeed = 0.0
    #get image and process it
    image = camera.getImage()
    leftSum = 0
    rightSum = 0
    cameraData = camera.getImage()

    for x in range(0, camera.getWidth()):
        for y in range(int(camera.getHeight() * 0.9), camera.getHeight()):
            gray = Camera.imageGetGray(cameraData, camera.getWidth(), x, y)
            if x < camera.getWidth() / 2:
                leftSum += gray
            else:
コード例 #12
0
def clamp(x, low, hi):
    return min(hi, max(low, x))


init_supervisor()
# create the Robot instance.
robot = supervisor

print("Goal Location:", goal_location)

# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())

# initialize camera
camera = Camera('camera')
camera.enable(1)
# position camera
camera_pitch_motor = robot.getMotor('camera pitch')
camera_pitch_motor.setPosition(math.pi / 2)
# Start propellers
initializeMotors(robot)
prop_takeoff_speed = 68.5
motorsSpeed(robot, prop_takeoff_speed, prop_takeoff_speed, prop_takeoff_speed,
            prop_takeoff_speed)
# Initialize Gyro
myGyro = robot.getGyro('gyro')
myGyro.enable(timestep)
# Initialize InertialUnit
inertial_unit = InertialUnit('inertial unit')
inertial_unit.enable(timestep)
# Initialize GPS
コード例 #13
0
class SupervisorController:
    def __init__(self,
                 timesteps=32,
                 gamma=0.99,
                 epsilon=1.0,
                 epsilon_min=0.01,
                 epsilon_log_decay=0.99,
                 alpha=0.01):
        self.supervisor = Supervisor()
        self.robot_node = self.supervisor.getFromDef("MY_BOT")
        if self.robot_node is None:
            sys.stderr.write(
                "No DEF MY_ROBOT node found in the current world file\n")
            sys.exit(1)
        self.trans_field = self.robot_node.getField("translation")
        self.rot_field = self.robot_node.getField("rotation")
        self.timestep = timesteps

        self.camera = Camera('camera')
        self.camera.enable(self.timestep)
        self.init_image = self.get_image()

        self.timestep = timesteps
        self.receiver = Receiver('receiver')
        self.receiver.enable(self.timestep)
        self.emitter = Emitter('emitter')

        self.memory = deque(maxlen=50000)
        self.batch_size = 128
        self.alpha = alpha
        self.gamma = gamma
        self.epsion_init = epsilon
        self.epsilon_min = epsilon_min
        self.epsilon_decay = epsilon_log_decay

        self.pre_state = self.init_image
        self.pre_action = -1

        self.pre_go_straight = False
        self.reward = 0
        self.step = 0
        self.max_step = 200
        self.file = None

        # interactive
        self.feedbackProbability = 0
        self.feedbackAccuracy = 1
        self.PPR = False
        self.feedbackTotal = 0
        self.feedbackAmount = 0

        self.init_model()
        self.init_parametter()

    def init_model(self):
        self.main_network = self.build_network()
        self.target_network = self.build_network()
        self.agent_network = self.build_network()
        self.generalise_model = self.init_gereral_model()
        self.pca_model = self.init_pca_model()

    def init_parametter(self):
        self.epsilon = self.epsion_init
        self.episode = 0
        self.policy_reuse = PPR()

    def init_gereral_model(self):
        n_clusters = 2
        return KMeans(n_clusters=n_clusters, n_init=10)

    def init_pca_model(self):
        n_component = 100
        return PCA(n_components=100, random_state=22)

    def get_image(self):
        image = self.camera.getImage()
        if image is None:
            empty_image = np.zeros((64, 64, 3))
            return Image.fromarray(empty_image.astype(np.uint8))
        else:
            return self.toPIL(image)

    def toPIL(self, bytes_data):
        imgPIL = Image.frombytes('RGBA', (64, 64), bytes_data)
        imgPIL = imgPIL.convert('RGB')
        return imgPIL

    def image_process(self, PIL):
        array = np.array(PIL)
        array = array / 255
        return np.reshape(array, list((1, ) + array.shape))

    def save_image(self, PIL, ep, step):
        PIL.save(resultsFolder + 'images/' + str(ep) + '_' + str(step) +
                 '.png')

    def update_target(self):
        self.target_network.set_weights(self.main_network.get_weights())

    def observation_space(self):
        return self.observation_space

    def get_epsilon(self, t):
        return max(
            self.epsilon_min,
            min(self.epsilon, 1.0 - math.log10((t + 1) * self.epsilon_decay)))

    def build_network(self):
        model = Sequential()
        model.add(Input(shape=(64, 64, 3)))
        model.add(Conv2D(4, kernel_size=8, activation='linear',
                         padding='same'))
        model.add(MaxPooling2D((2, 2), padding='same'))
        model.add(Conv2D(8, kernel_size=4, activation='linear',
                         padding='same'))
        model.add(MaxPooling2D((2, 2), padding='same'))
        model.add(
            Conv2D(16, kernel_size=2, activation='linear', padding='same'))
        model.add(MaxPooling2D((2, 2), padding='same'))
        model.add(Flatten())
        model.add(Dense(256, activation='linear'))
        model.add(Dense(len(self.action_space()), activation='softmax'))
        opt = Nadam(learning_rate=self.alpha)
        model.compile(loss='mse', optimizer=opt)
        return model

    def save_reward(self, file, rewards, totals, feedbacks):
        pairs = {'Reward': rewards, 'Total': totals, 'Feedback': feedbacks}
        data_df = pd.DataFrame.from_dict(pairs)
        data_df.to_csv(file)

    def save_model(self, file):
        self.main_network.save_weights(file)
        self.save_generalise_model(file)

    def save_generalise_model(self, filename):
        obs = [s[5] for s in self.memory]
        with open(filename + 'gel', "wb") as f:
            pickle.dump(self.generalise_model, f)
        with open(filename + 'pca', "wb") as f:
            pickle.dump(self.pca_model, f)
        with open(filename + 'state', "wb") as f_:
            pickle.dump(obs, f_)

    def load_generalise_model(self, filename):
        with open(filename + 'gel', "rb") as f:
            print(filename + 'gel')
            self.generalise_model = pickle.load(f)
        with open(filename + 'pca', "rb") as f:
            self.pca_model = pickle.load(f)

    def load_model(self, file):
        self.agent_network.load_weights(file + '.model')
        self.load_generalise_model(file + '.model')
        self.update_target()

    def finalise(self, rewards, totals, feedbacks, ppr):
        file = self.file + '_' + str(self.feedbackProbability) + '_' + str(
            self.feedbackAccuracy) + str(ppr)
        self.save_reward(file + '.csv', rewards, totals, feedbacks)
        self.save_model(file + '.model')

    def get_group(self, state):
        # nx, ny, nz = state[0].shape
        # state = state.reshape(nx * ny * nz)
        # state = [state]
        # new_state = self.pca_model.transform(state)

        nx, ny, nz = state[0].shape
        image_grayscale = state[0].mean(axis=2).astype(np.float32)
        image_grayscale = image_grayscale.reshape(nx * ny)
        image_grayscale = [image_grayscale]
        return self.generalise_model.predict(image_grayscale)[0]

    def memorize(self, state, action, reward, next_state, done, obs):
        self.memory.append((state, action, reward, next_state, done, obs))

    def updatePolicy(self, batchSize=0):
        if batchSize == 0:
            batchSize = self.batch_size
        if len(self.memory) < batchSize:
            self.trainNetwork(len(self.memory))
            return  # do nothing
        self.trainNetwork(batchSize)
        return

    def trainNetwork(self, batch_size):

        # sample a mini batch of transition from the replay buffer
        minibatch = random.sample(self.memory, batch_size)
        states = []
        targets = []

        for state, action, reward, next_state, done, obs in minibatch:
            state_processed = self.image_process(state)
            next_state_processed = self.image_process(next_state)

            if not done:
                target = self.target_network.predict(next_state_processed)
                target_Q = (reward + self.gamma * np.max(target[0]))
            else:
                target_Q = reward
            # compute the Q value using the main network
            Q_values = self.main_network.predict(state_processed)
            Q_values[0][action] = target_Q
            states.append(state_processed[0])
            targets.append(Q_values[0])
        # train the main network
        states = np.array(states)
        targets = np.array(targets)
        self.main_network.fit(states, targets, epochs=1, verbose=0)

    def normal_action(self, state, epsilon=0.1):
        # exploration
        if np.random.random() <= epsilon:
            action = self.random_action()
            # PPR:
            if self.PPR:
                group = self.get_group(state)
                redoAction, rate = self.policy_reuse.get(group)
                # print(group, rate)
                if (np.random.rand() < rate):
                    action = redoAction
            # end PPR:

        # exploitation
        else:
            action = np.argmax(self.main_network.predict(state))
        return action

    def action_space(self):
        """
        0: left
        1: right
        2: straight
        """
        return [0, 1, 2]

    def random_action(self):
        # if np.random.rand() < 0.5:
        #     return 2
        # else:
        #     return random.choice([0, 1])
        return random.choice(self.action_space())

    def propose_action(self, obs):
        return

    def has_obstacle(self, leftValue, rightValue):
        return leftValue > 500 or rightValue > 500

    def back_to_begin(self):
        INITIAL = [0, 0, 0]
        self.trans_field.setSFVec3f(INITIAL)
        ROT_INITIAL = [0, 1, 0, 3.2]
        self.rot_field.setSFRotation(ROT_INITIAL)

    def reset(self):
        self.pre_state = self.init_image
        self.pre_action = -1

        self.pre_go_straight = False
        self.reward = 0
        self.step = 0
        self.finish = False

        self.feedbackTotal = 0
        self.feedbackAmount = 0

        self.back_to_begin()
        self.send_to_robot('reset', None)

    def propose_new_action(self, obs):
        left, right = obs
        obstacle_flag = self.has_obstacle(left, right)

        pre_state_processed = self.image_process(self.pre_state)
        if not obstacle_flag:
            action = 2
            self.pre_action = action
            self.pre_go_straight = True
        else:

            # propose new action ------------------
            if self.PPR:
                self.policy_reuse.step()
            if np.random.rand() < self.feedbackProbability:
                # get advice
                trueAction = np.argmax(
                    self.agent_network.predict(pre_state_processed))

                # PPR:
                if self.PPR:
                    group = self.get_group(pre_state_processed)
                    self.policy_reuse.add(group, trueAction)
                # end PPR:

                if np.random.rand() < self.feedbackAccuracy:
                    action = trueAction
                else:
                    while True:
                        action = self.random_action()
                        if action != trueAction:
                            break
                self.feedbackAmount += 1
            else:
                action = self.normal_action(pre_state_processed, self.epsilon)
            self.pre_go_straight = False
            self.feedbackTotal += 1

            self.pre_action = action
        return action

    def execute(self, obs, reward, done, info):

        state = self.get_image()
        if self.pre_action != -1:
            self.reward += reward
            if self.step == self.max_step or done:
                if done:
                    self.save_image(state, self.episode, self.step)
                    self.save_image(self.pre_state, self.episode,
                                    self.step - 1)

                self.memorize(self.pre_state, self.pre_action, reward, state,
                              done, obs)
                self.updatePolicy(self.step)
                self.update_target()

                if self.epsilon > self.epsilon_min:
                    self.epsilon *= self.epsilon_decay
                self.episode += 1
                self.finish = True
                return

            if info:
                self.back_to_begin()

            if not self.pre_go_straight:
                self.memorize(self.pre_state, self.pre_action, reward, state,
                              done, obs)
                self.pre_state = state

        return

    def receive_handle(self):
        send_message, send_data = None, None
        if self.receiver.getQueueLength() > 0:
            data = self.receiver.getData()
            message, d = pickle.loads(data)
            if message == 'step_done':
                obs, r, d, i, s = d
                # print(s, self.step - 1, self.pre_action, r)  # check synchronize

                self.execute(obs, r, d, i)
                if not self.finish:
                    action = self.propose_new_action(obs)
                    self.send_to_robot('step', action)
                    self.step += 1
            if message == 'reset_done':
                obs = d
                self.execute(obs, 0, False, False)
                action = self.propose_new_action(obs)
                self.send_to_robot('step', action)
            if message == 'obstacle':
                self.back_to_begin()

            self.receiver.nextPacket()
        return

    def send_to_robot(self, message, data):
        data = message, data, self.step
        dataSerialized = pickle.dumps(data)
        self.emitter.send(dataSerialized)

    def start(self,
              max_step,
              episodes,
              file,
              feedbackP=0,
              feedbackA=1,
              PPR=False):
        self.file = file
        self.max_step = max_step
        self.feedbackProbability = feedbackP
        self.feedbackAccuracy = feedbackA
        self.PPR = PPR
        rewards = []
        feedbackTotal = []
        feedbackAmount = []
        self.init_parametter()
        for i in range(episodes):
            self.reset()
            self.episode = i
            while self.supervisor.step(
                    self.timestep) != -1 and not self.finish:
                self.receive_handle()
            print(i, self.reward, self.feedbackTotal, self.feedbackAmount)
            rewards.append(self.reward)
            feedbackTotal.append(self.feedbackTotal)
            feedbackAmount.append(self.feedbackAmount)
        self.finalise(rewards, feedbackTotal, feedbackAmount, PPR)
コード例 #14
0
"""data_collector controller."""
import random
import time
import csv
import numpy as np
from controller import Robot, Camera, PositionSensor

# create the Robot instance.
robot = Robot()

# get the time step of the current world.
timestep = int(robot.getBasicTimeStep())

# get and enable Camera
cameraTop = Camera("CameraTop")
cameraTop.enable(timestep)

print("Width: ", cameraTop.getWidth())
print("Height: ", cameraTop.getHeight())
#cameraTop.saveImage("cameraImg.png", 50)

cameraBottom = Camera("CameraBottom")
cameraBottom.enable(timestep)

#Move head in right position
#HeadYaw = robot.getMotor("HeadYaw")
#HeadPitch = robot.getMotor("HeadPitch")

#HeadYaw.setPosition(0.5)
#HeadYaw.setVelocity(1.0)
#HeadPitch.setPosition(0.2)
コード例 #15
0
import matplotlib.pyplot as plt
import numpy as np
import math

TIME_STEP = 64  # ms
MAX_SPEED = 100  # km/h

driver = Driver()

speedFoward = 0.1 * MAX_SPEED  # km/h
speedBrake = 0  # km/h
cont = 0
plot = 10

cameraRGB = driver.getCamera('camera')
Camera.enable(cameraRGB, TIME_STEP)

lms291 = driver.getLidar('Sick LMS 291')
print(lms291)
Lidar.enable(lms291, TIME_STEP)
lms291_width = Lidar.getHorizontalResolution(lms291)
print(lms291_width)

fig = plt.figure(figsize=(3, 3))

while driver.step() != -1:

    if cont < 1000:
        driver.setDippedBeams(True)  # farol ligado
        driver.setIndicator(0)  # 0 -> OFF  1 -> Right   2 -> Left
        driver.setCruisingSpeed(speedFoward)  # acelerador (velocidade)
コード例 #16
0
ファイル: all1.py プロジェクト: robotclasses/hw1
# Create the Robot instance.
robot = Robot()
robot.getSupervisor()


basicTimeStep = int(robot.getBasicTimeStep())
# print(robot.getDevice("camera"))
camera1=robot.getCamera("Camera")
print(camera1)
# camera= Camera(camera1)
camera= Camera('Camera')
# print(robot.getCamera('Camera'))
# camera.wb_camera_enable()
mTimeStep=basicTimeStep
camera.enable(int(mTimeStep))
camera.getSamplingPeriod()
# width=camera.getWidth()
# height=camera.getHeight()
firstimage=camera.getImage()
ori_width = int(4 * 160)  # 原始图像640x480
ori_height = int(3 * 160)
r_width = int(4 * 20)  # 处理图像时缩小为80x60,加快处理速度,谨慎修改!
r_height = int(3 * 20)
color_range = {'yellow_door': [(10, 43, 46), (34, 255, 255)],
               'red_floor1': [(0, 43, 46), (10, 255, 255)],
               'red_floor2': [(156, 43, 46), (180, 255, 255)],
               'green_bridge': [(35, 43, 20), (100, 255, 255)],
               'yellow_hole': [(10, 70, 46), (34, 255, 255)],
               'black_hole': [(0, 0, 0), (180, 255, 80)],
               'black_gap': [(0, 0, 0), (180, 255, 100)],
コード例 #17
0
ファイル: doguiBot.py プロジェクト: KHRSOP/DoguiBot
"""seguidor_linha controller"""
from controller import Robot
from controller import Camera
import cv2
import numpy as np
import math
import time

robot = Robot()
timestep = int(robot.getBasicTimeStep())

#Camera

camera = Camera("camera")
camera.enable(timestep)
MAX_MOTORS = 16
L1 = 9  # UPPER_LEG_LENGTH [cm]
L2 = 8.5  # LOWER_LEG_LENGTH [cm]
M_PI = 3.14159265358979323846
gait_type = 5
i = 0
key = -1
backward = True
slf = 1
slf_min = 0
slf_max = 1
stride_length_factor = [slf, slf, slf, slf]
freq_min = 0.4
freq_max = 2
freq = 1.5
freq_offset = 0.2
コード例 #18
0
class RobotController:
    def __init__(self, max_steps=2, init_position=(0, 0, 0), final_position=(-0.3, 0, 0.3), max_speed=3,
                 ):
        self.robot = Robot()

        self.timestep = int(self.robot.getBasicTimeStep())
        self.max_steps = max_steps
        self.max_speed = max_speed

        self.setupDevice()

        self.init_position = init_position
        self.current_position = init_position
        self.final_position = final_position

        self.done = False

        # Interactive
        self.feedbackAmount = 0

        # self.policy_reuse = PPR()

    def setupDevice(self):
        self.leftMotor = self.robot.getDevice('left wheel motor')
        self.rightMotor = self.robot.getDevice('right wheel motor')
        self.leftMotor.setPosition(float('inf'))
        self.rightMotor.setPosition(float('inf'))

        self.rightDistanceSensor = self.robot.getDevice('ds1')
        self.leftDistanceSensor = self.robot.getDevice('ds0')
        self.rightDistanceSensor.enable(self.timestep)
        self.leftDistanceSensor.enable(self.timestep)

        self.gps = self.robot.getDevice('gps')
        self.touchSensor1 = self.robot.getDevice('touch_sensor1')
        self.touchSensor2 = self.robot.getDevice('touch_sensor2')
        self.touchSensor3 = self.robot.getDevice('touch_sensor3')
        self.touchSensor4 = self.robot.getDevice('touch_sensor4')
        self.touchSensor5 = self.robot.getDevice('touch_sensor5')
        self.gps.enable(self.timestep)
        self.touchSensor1.enable(self.timestep)
        self.touchSensor2.enable(self.timestep)
        self.touchSensor3.enable(self.timestep)
        self.touchSensor4.enable(self.timestep)
        self.touchSensor5.enable(self.timestep)

        self.camera = Camera('camera')
        self.camera.enable(self.timestep)

        self.leftMotor.setVelocity(0)
        self.rightMotor.setVelocity(0)

        self.init_leftValue = self.leftDistanceSensor.getValue()
        self.init_rightValue = self.rightDistanceSensor.getValue()

        self.receiver = Receiver('receiver')
        self.emitter = Emitter('emitter')
        self.receiver.enable(self.timestep)

    def is_collised(self):
        if (self.touchSensor1.getValue() + self.touchSensor2.getValue() +
                self.touchSensor3.getValue() + self.touchSensor4.getValue() +
                self.touchSensor5.getValue() > 0):
            print(1, self.touchSensor1.getValue())
            print(2, self.touchSensor2.getValue())
            print(3, self.touchSensor3.getValue())
            print(4, self.touchSensor4.getValue())
            print(5, self.touchSensor5.getValue())
            return True
        gpsValue = self.gps.getValues()
        self.current_position = gpsValue
        if (self.current_position[0] < - 0.5 or self.current_position[0] > 0.5 or
                self.current_position[2] < - 0.5 or self.current_position[2] > 0.5):
            return True

        return False

    def step(self, a):
        if not self.done:
            self.robot.step(self.timestep)
            if not self.is_collised():
                leftValue = self.leftDistanceSensor.getValue()
                rightValue = self.rightDistanceSensor.getValue()
                reward = -0.1
                leftSpeed, rightSpeed = 0, 0
                if a == 0:
                    leftSpeed, rightSpeed = self.turnLeft(leftValue, rightValue)
                elif a == 1:
                    leftSpeed, rightSpeed = self.turnRight(leftValue, rightValue)
                elif a == 2:
                    leftSpeed, rightSpeed = self.goStraight()
                    reward = 0
                # elif a == 3:
                #     leftSpeed, rightSpeed = self.goSlow()

                self.leftMotor.setVelocity(leftSpeed)
                self.rightMotor.setVelocity(rightSpeed)

                # set observation .............

                observations = leftValue, rightValue

                # set reward .............

                # set done .........
                r = self.set_done()
                return observations, reward + r, self.done, False

            else:
                observations = self.reset()
                reward = -100
                return observations, reward, False, True

        return None, None, self.done, False

    def set_done(self):
        gpsValue = self.gps.getValues()
        self.current_position = gpsValue
        if abs(self.current_position[0] - self.final_position[0]) <= 0.08 and \
                abs(self.current_position[2] - self.final_position[2]) <= 0.08:
            self.done = True
            return 1000
        return 0

    def random_action(self):
        return random.choice(self.action_space())

    def goStraight(self):
        return self.max_speed, self.max_speed

    def goSlow(self):
        return self.max_speed / 4, self.max_speed / 4

    def turnLeft(self, leftDistance, rightDistance):
        return -(leftDistance / 100), (rightDistance / 100) + 0.5

    def turnRight(self, leftDistance, rightDistance):
        return (leftDistance / 100) + 0.5, -(rightDistance / 100)

    def reset(self):
        self.done = False
        return self.init_leftValue, self.init_rightValue

    def send_to_super(self, message, data):
        data = message, data
        dataSerialized = pickle.dumps(data)
        self.emitter.send(dataSerialized)

    def receive_handle(self):
        if self.receiver.getQueueLength() > 0:
            data = self.receiver.getData()
            message, action, step = pickle.loads(data)
            self.receiver.nextPacket()
            if message == 'step':
                obs, r, d, i = self.step(action)
                data = obs, r, d, i, step
                self.send_to_super('step_done', data)
            if message == 'reset':
                obs = self.reset()
                self.send_to_super('reset_done', obs)

        return -1

    def start(self):
        while self.robot.step(self.timestep) != -1:
            self.receive_handle()
コード例 #19
0
'''.................................'''

# head + tail name pipe
read_name_list = [(i + "%s.pipe" % (channel_down + 1)) for i in read_name_down]
write_name_list = [(i + "%s.pipe" % (channel_down + 1))
                   for i in write_name_down]
all_path = read_name_list + write_name_list
# print(all_path)
make_pipe(all_path)

obs_pipe_down, touch_pipe_down, reward_pipe_down, over_pipe_down, terminal_pipe_down = open_write_pipe(
    write_name_list)
action_pipe_down, reset_pipe_down = open_read_pipe(read_name_list)

CAM_A = Camera("CAM_A")
CAM_A.enable(32)
CAM_A.recognitionEnable(32)
'''
initial_obs, initial_state = initial_step()
write_to_pipe([obs_pipe, touch_pipe], [initial_obs, initial_state])
print(np.array(initial_obs).shape, initial_state)
'''
initial_state = initial_step()
print("init_state: {}".format(initial_state))
write_to_pipe(touch_pipe, initial_state)


class TaskType(Enum):
    UP = 'up_dopamine'
    DOWN = 'down_dopamine'
    GRAB = 'grab_ik'
コード例 #20
0
MAX_SPEED = 6.28

# maximal value returned by the sensors
MAX_SENSOR_VALUE = 1024
# minimal distance, in meters, for an obstacle to be considered
MIN_DISTANCE = 1.0

# create the robot instance
robot = Robot()

# inicializa kinect
kinectColor = robot.getCamera('kinect color')
kinectDepth = robot.getRangeFinder('kinect range')
print(kinectColor)
Camera.enable(kinectColor, TIME_STEP)
RangeFinder.enable(kinectDepth, TIME_STEP)

# get a handler to the motors and set target position to infinity (speed control)
leftMotorFront = robot.getMotor('front left wheel')
rightMotorFront = robot.getMotor('front right wheel')
leftMotorBack = robot.getMotor('back left wheel')
rightMotorBack = robot.getMotor('back right wheel')

leftMotorFront.setPosition(float('inf'))
rightMotorFront.setPosition(float('inf'))
leftMotorBack.setPosition(float('inf'))
rightMotorBack.setPosition(float('inf'))

# initialize devices
ps = []
コード例 #21
0

# this function same as above function. It focuses the bin.
def foundbin(k, l):
    print("bin is detected")
    robot.setSpeed(k, l)
    # same as "foundtrash" method.
    if ((k >= 190 and l >= 190) and (left7 >= 140 or right0 >= 140)
            and (red[20] >= 100 and green[20] <= 10 and blue[20] <= 10)):
        led[0].set(0)
        print(" I am next to bin")


# enable camera.
camera = Camera("camera")
camera.enable(timestep * 2)
print("Camera width = ", camera.getWidth(), "Camera height =",
      camera.getHeight())

# enable LEDs
led = [0] * 8
count = 0
for i in range(8):
    name = "led" + str(i)
    led[i] = LED(name)

robot.enableEncoders(timestep)
# enable distance and ground sensors
irLeft7 = DistanceSensor("ps7")
irLeft6 = DistanceSensor("ps6")
irLeft5 = DistanceSensor("ps5")