Esempio n. 1
0
def setup_devices():
    """ Assign the devices from the simulator to specific IDs """
    global robotID, left_motorID, right_motorID, visonID
    # res: result (1(OK), -1(error), 0(not called))
    # robot
    res, robotID = vrep.simxGetObjectHandle(clientID, 'robot#', WAIT)
    # motors
    res, left_motorID = vrep.simxGetObjectHandle(clientID, 'leftMotor#', WAIT)
    res, right_motorID = vrep.simxGetObjectHandle(clientID, 'rightMotor#',
                                                  WAIT)
    res, visonID = vrep.simxGetObjectHandle(clientID, "Vision_sensor#", WAIT)
    res, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visonID, 0, STREAMING)
    time.sleep(0.5)
    # start up devices

    # wheels
    vrep.simxSetJointTargetVelocity(clientID, left_motorID, 0, STREAMING)
    vrep.simxSetJointTargetVelocity(clientID, right_motorID, 0, STREAMING)
    # pose
    vrep.simxGetObjectPosition(clientID, robotID, -1, MODE_INI)
    vrep.simxGetObjectOrientation(clientID, robotID, -1, MODE_INI)
    # vision sensor
    vrep.simxGetVisionSensorImage(clientID, visonID, 1, STREAMING)
    return
Esempio n. 2
0
 def initialize_vrep_cameras(self):
     #vrep setup
     vrep.simxFinish(-1)
     self.clientID = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)
     if(self.clientID == -1):
         raise RuntimeError("ClientID -1, could not connect to vrep")
     else:
          #Top camera
         #Get the handle of the top vision sensor
         res1, self.top_camera_handle = vrep.simxGetObjectHandle(
             self.clientID, self.top_camera_name, vrep.simx_opmode_oneshot_wait)
         if(res1 != 0):
             raise RuntimeError("Could not get top camera handle. res = " + str(res1))
         #Get the handle of the bottom vision sensor
         res1, self.bottom_camera_handle = vrep.simxGetObjectHandle(
             self.clientID, self.bottom_camera_name, vrep.simx_opmode_oneshot_wait)
         if(res1 != 0):
             raise RuntimeError("Could not get bottom camera handle. res = " + str(res1))
         
         #Initialize top camera stream
         _, _, _ = vrep.simxGetVisionSensorImage(
             self.clientID, self.top_camera_handle, 0, vrep.simx_opmode_streaming)
         
         #Initialize bottom camera stream
         _, _, _ = vrep.simxGetVisionSensorImage(
             self.clientID, self.bottom_camera_handle, 0, vrep.simx_opmode_streaming)
def analysisVideo(vision_sensor_name, client_id, config,
                  handle_video_coordlist):
    res1, visionSensorHandle = vrep.simxGetObjectHandle(
        client_id, vision_sensor_name, vrep.simx_opmode_oneshot_wait)
    # config["vision_sensor_handle"] = visionSensorHandle
    vrep.simxGetVisionSensorImage(client_id, visionSensorHandle, 0,
                                  vrep.simx_opmode_streaming)
    time.sleep(1)
    while (vrep.simxGetConnectionId(client_id) != -1):
        # if visionSensorHandle != config["vision_sensor_handle"]:
        #     continue
        # print "config_handle:", config["vision_sensor_handle"]
        # print "handle:", visionSensorHandle
        res, resolution, image = vrep.simxGetVisionSensorImage(
            client_id, visionSensorHandle, 0, vrep.simx_opmode_buffer)
        while (not image):
            pass
        if len(resolution) == 0:
            continue
        image_byte_array = array.array('b', image)
        _img = I.frombuffer("RGB", (resolution[0], resolution[1]),
                            image_byte_array, "raw", "RGB", 0, 1)
        im = np.array(_img)[::-1, :, ::-1].copy()
        coordlist = detect(im)  # 检测目标物体坐标
        handle_video_coordlist(coordlist, config)
        # if config["play_video"]:
        cv2.imshow(config["video_label"], im)
        if cv2.waitKey(15) & 0xFF == 27:  # Press ESC to exit :)
            break
    print 'End of Analysis'
Esempio n. 4
0
def streamVisionSensor(visionSensorName, clientID, pause, sample_time):
    #Get the handle of the vision sensor
    res1, visionSensorHandle = vrep.simxGetObjectHandle(
        clientID, visionSensorName, vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, vrep.simx_opmode_streaming)
    #Allow the display to be refreshed
    plt.ion()
    #Initialiazation of the figure
    time.sleep(0.5)
    res, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, vrep.simx_opmode_buffer)
    print resolution
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    #Give a title to the figure
    fig = plt.figure(1)
    fig.canvas.set_window_title(visionSensorName)
    #inverse the picture
    plotimg = plt.imshow(im, origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    img = I.new("RGB", (resolution[0], resolution[1]))
    while (vrep.simxGetConnectionId(clientID) != -1):
        start = time.time()
        #Get the image of the vision sensor
        res, resolution, image = vrep.simxGetVisionSensorImage(
            clientID, visionSensorHandle, 0, vrep.simx_opmode_buffer)
        #Transform the image so it can be displayed using pyplot
        image_byte_array = array.array('b', image)
        im = I.frombuffer("RGB", (resolution[0], resolution[1]),
                          image_byte_array, "raw", "RGB", 0, 1)
        #Update the image
        plotimg.set_data(im)
        #Refresh the display
        plt.draw()
        #The mandatory pause ! (or it'll not work)
        plt.pause(pause)
        #Set the image for naoqi
        imagepixel = []  #*(resolution[0]*resolution[1])
        for i in xrange(resolution[0] * resolution[1]):
            r = i * 3
            g = r + 1
            b = r + 2
            imagepixel.append((image[b], image[g], image[r]))
        img.putdata(imagepixel)
        # print image
        # print imagepixel
        data = img.tostring()
        im = im.rotate(180)
        videoDeviceProxy.putImage(vision_definitions.kTopCamera, resolution[0],
                                  resolution[1], im.tostring())
        end = time.time()
        dt = end - start
        if dt < sample_time:
            time.sleep(sample_time - dt)
        else:
            print "Can't keep a period (%gs>%gs)" % (dt, sample_time)

    print 'End of Simulation'
    def __init__(self, clientID, printFlag = False):
        self.clientID = clientID;
        # retrieve motor  handles
        errorCode, self.steer_handle = vrep.simxGetObjectHandle(self.clientID, 'steer_joint', vrep.simx_opmode_oneshot_wait);
        errorCode, self.motor_handle = vrep.simxGetObjectHandle(self.clientID, 'motor_joint', vrep.simx_opmode_oneshot_wait);
        errorCode, self.fl_brake_handle = vrep.simxGetObjectHandle(self.clientID, 'fl_brake_joint', vrep.simx_opmode_oneshot_wait);
        errorCode, self.fr_brake_handle = vrep.simxGetObjectHandle(self.clientID, 'fr_brake_joint', vrep.simx_opmode_oneshot_wait);
        errorCode, self.bl_brake_handle = vrep.simxGetObjectHandle(self.clientID, 'bl_brake_joint', vrep.simx_opmode_oneshot_wait);
        errorCode, self.br_brake_handle = vrep.simxGetObjectHandle(self.clientID, 'br_brake_joint', vrep.simx_opmode_oneshot_wait);
        errorCode, self.camera_f_handle = vrep.simxGetObjectHandle(self.clientID, 'cam_f', vrep.simx_opmode_oneshot_wait);
        
        vrep.simxGetVisionSensorImage(self.clientID, self.camera_f_handle, 0, vrep.simx_opmode_streaming)
        print('Received Handles...');

        self.factor = 30/(2.68*3.6);
        self.max_throttle = 19; # Kmph
        self.max_reverse_throttle = -19; #Kmph
        self.max_steer = 30; # Degrees

        self.printFlag = printFlag;
        
        # Self test the camera
        print('Setting up the camera system...');
        self.lastFrame = None;
        err = 0;
        while(err != 1):
            err, self.lastFrame = self.get_image();
        print('Camera setup successful.')
Esempio n. 6
0
def streamVisionSensor(visionSensorName, clientID, pause=0.0001):
    #Get the handle of the vision sensor
    res1, visionSensorHandle = vrep.simxGetObjectHandle(
        clientID, visionSensorName, vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, vrep.simx_opmode_streaming)
    #Initialiazation of the figure
    time.sleep(0.5)
    res, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, vrep.simx_opmode_buffer)
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    # Init figure
    plt.ion()
    fig = plt.figure(1)
    plotimg = plt.imshow(im, origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    c = 0
    while (vrep.simxGetConnectionId(clientID) != -1):
        #Get the image of the vision sensor
        res, resolution, image = vrep.simxGetVisionSensorImage(
            clientID, visionSensorHandle, 0, vrep.simx_opmode_buffer)
        #Transform the image so it can be displayed
        img = np.array(image, dtype=np.uint8).reshape(
            [resolution[1], resolution[0], 3])
        #Update the image
        fig.canvas.set_window_title(visionSensorName + '_' + str(c))
        plotimg.set_data(img)
        plt.draw()
        plt.pause(pause)
        c += 1
    print('End of Simulation')
Esempio n. 7
0
    def get_image(self):
        # camera1 1
        res, resolution, image1 = vrep.simxGetVisionSensorImage(
            self.client_id,
            self.camera_handles[0],
            options=0,
            operationMode=vrep.simx_opmode_blocking)
        image1 = np.array(image1, dtype=np.uint8)
        image1 = np.reshape(image1, [resolution[1], resolution[0], 3])[::-1,
                                                                       ...]

        # camera1 2
        res, resolution, image2 = vrep.simxGetVisionSensorImage(
            self.client_id,
            self.camera_handles[1],
            options=0,
            operationMode=vrep.simx_opmode_blocking)
        image2 = np.array(image2, dtype=np.uint8)
        image2 = np.reshape(image2, [resolution[1], resolution[0], 3])[::-1,
                                                                       ...]

        # camera1 3
        res, resolution, image3 = vrep.simxGetVisionSensorImage(
            self.client_id,
            self.camera_handles[2],
            options=0,
            operationMode=vrep.simx_opmode_blocking)
        image3 = np.array(image3, dtype=np.uint8)
        image3 = np.reshape(image3, [resolution[1], resolution[0], 3])[::-1,
                                                                       ...]

        resolution = resolution[::-1]
        return resolution, [image1, image2, image3]
def streamVisionSensor(visionSensorName,clientID,pause=0.0001):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    #Allow the display to be refreshed
    plt.ion()
    #Initialiazation of the figure
    time.sleep(0.5)
    res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    #Give a title to the figure
    fig = plt.figure(1)    
    fig.canvas.set_window_title(visionSensorName)
    #inverse the picture
    plotimg = plt.imshow(im,origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    while (vrep.simxGetConnectionId(clientID)!=-1): 
        #Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        #Transform the image so it can be displayed using pyplot
        image_byte_array = array.array('b',image)
        im = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "RGB", 0, 1)
        #Update the image
        plotimg.set_data(im)
        #Refresh the display
        plt.draw()
        #The mandatory pause ! (or it'll not work)
        plt.pause(pause)
    print 'End of Simulation'
Esempio n. 9
0
def obtener_camara_handler(clientID):
    """
    Funcion que obtiene un handler de la camara, el cual se puede utilizar
    para acceder al laser mas adelante. Ademas, inicializa el visor de la
    camara y del laser

    Args:
        clientID: ID del cliente que ha establecido una conexion con el
                  servidor de V-REP.
    Return:
        Devuelve el handler de la camara
    """
    # Obtener handler
    _, camhandle = vrep.simxGetObjectHandle(clientID, 'Vision_sensor',
                                            vrep.simx_opmode_oneshot_wait)

    # Inicializar camara y laser
    vrep.simxGetVisionSensorImage(clientID, camhandle, 0,
                                  vrep.simx_opmode_streaming)
    vrep.simxGetStringSignal(clientID, 'LaserData', vrep.simx_opmode_streaming)

    # Esperar 1 segundo hasta que se rellene el buffer
    time.sleep(1)

    return camhandle
Esempio n. 10
0
def main():
    # Initialize two Vision Sensors. If we don't do this, we can't retrieve images from them.
    vrep.simxGetVisionSensorImage(clientID, line_sensor, 0, vrep.simx_opmode_streaming)
    vrep.simxGetVisionSensorImage(clientID, obstacle_sensor, 0, vrep.simx_opmode_streaming)
    time.sleep(1)

    # PID Parameters
    # Motor Kp Ki Kd
    # 5 0.01 0 0
    # 10 0.0001 0.0005

    pid = pid_controller(1, 0.0001, 0.0005)
    # Set the Power!
    motor(10)

    # Start the main loop.
    while vrep.simxGetConnectionId(clientID) != -1:
        raw = get_line_image()
        # Crop the line image.
        raw = raw[128:192, :]
        # Find the desired moment.
        ret = get_moment(raw)
        if ret is None:
            continue

        cx, cy, img = ret

        # Calculate the angle we want to steer.
        angle = math.atan((128 - cx) / cy)
        steer(pid(angle))

        cv2.imshow("result", img)
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break
Esempio n. 11
0
def getVisionSensor(visionSensorName,clientID,sample_time):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    time.sleep(0.5)
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    print resolution
    img = I.new("RGB", (resolution[0], resolution[1]))
    while (vrep.simxGetConnectionId(clientID)!=-1): 
        start = time.time()
        #Get the image of the vision sensor
        res,resolution,image = vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)

        image_byte_array = array.array('b',image)
        im = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "RGB", 0, 1)

        # #Set the image for naoqi
        # imagepixel = []#*(resolution[0]*resolution[1])
        # for i in xrange(resolution[0] * resolution[1]):
        #     r = i*3
        #     g = r+1
        #     b = r+2
        #     imagepixel.append((image[b], image[g], image[r]))
        # img.putdata(imagepixel)
        # print image
        # print imagepixel
        videoDeviceProxy.putImage(vision_definitions.kTopCamera, resolution[0], resolution[1], im.rotate(180).tostring())
        end = time.time()
        dt = end-start
        # if dt < sample_time:
        #     time.sleep(sample_time - dt)
        # else:
        #     print "Can't keep a period (%gs>%gs)" % (dt,sample_time)
    print 'End of Simulation'
Esempio n. 12
0
def getDroneImage(clientID, height):
    position = np.asarray([0, 0, height])

    # Get Drone Handle
    errorCode, droneHandle = vrep.simxGetObjectHandle(
        clientID, 'drone', vrep.simx_opmode_oneshot_wait)

    # Set Drone Height (in meters)
    vrep.simxSetObjectPosition(clientID, droneHandle, -1, position,
                               vrep.simx_opmode_oneshot_wait)

    # Get Drone Camera Image (First Call)
    errorDrone, resolution, droneImage = vrep.simxGetVisionSensorImage(
        clientID, droneHandle, 0, vrep.simx_opmode_streaming)
    count = 0
    while (vrep.simxGetConnectionId(clientID) != -1):

        # Read in drone camera image (Note operating mode changed to buffer)
        errorDrone, resolution, droneImage = vrep.simxGetVisionSensorImage(
            clientID, droneHandle, 0, vrep.simx_opmode_buffer)
        count += 1

        # quit after 5 images read (first images may be empty)
        if (errorDrone == vrep.simx_return_ok and count >= 1):
            droneImage = np.array(droneImage, dtype=np.uint8)
            droneImage.resize([resolution[1], resolution[0], 3])
            break
    return resolution, droneImage
Esempio n. 13
0
def streamVisionSensor(visionSensorName,clientID,pause=0.0001):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    #Allow the display to be refreshed
    plt.ion()
    #Initialiazation of the figure
    time.sleep(0.5)
    res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    #Give a title to the figure
    fig = plt.figure(1)
    fig.canvas.set_window_title(visionSensorName)
    #inverse the picture
    plotimg = plt.imshow(im,origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    while (vrep.simxGetConnectionId(clientID)!=-1):
        #Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        #Transform the image so it can be displayed using pyplot
        image_byte_array = array.array('b',image)
        im = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "RGB", 0, 1)
        #Update the image
        plotimg.set_data(im)
        #Refresh the display
        plt.draw()
        #The mandatory pause ! (or it'll not work)
        plt.pause(pause)
    print 'End of Simulation'
def startDataStreaming():
    if (not clientID):
        print('connect first')
    elif (clientID[0] == -1):
        print('connect first')
    else:
        #Position
        for i in Objects_handle:
            vrep.simxGetObjectPosition(clientID[0], i, UR3_handle[0],
                                       vrep.simx_opmode_streaming)

        #Dummy position
        vrep.simxGetObjectPosition(clientID[0], end_effector[0], UR3_handle[0],
                                   vrep.simx_opmode_streaming)
        #vision
        for i in vision_sensor:
            vrep.simxGetVisionSensorImage(clientID[0], i, 0,
                                          vrep.simx_opmode_streaming)
        #collision
        for i in collision_handle:
            vrep.simxReadCollision(clientID[0], i, vrep.simx_opmode_streaming)
        #UR3 Joint
        for i in UR3_joint_handle:
            vrep.simxGetJointPosition(clientID[0], i,
                                      vrep.simx_opmode_streaming)
Esempio n. 15
0
        def initialize_vrep_cameras(top_camera_name, bottom_camera_name,
                                    clientID):
            #Top camera
            #Get the handle of the top vision sensor
            res1, top_camera_handle = vrep.simxGetObjectHandle(
                clientID, top_camera_name, vrep.simx_opmode_oneshot_wait)
            if (res1 != 0):
                raise RuntimeError("Could not get top camera handle. res = " +
                                   str(res1))
            #Get the handle of the bottom vision sensor
            res1, bottom_camera_handle = vrep.simxGetObjectHandle(
                clientID, bottom_camera_name, vrep.simx_opmode_oneshot_wait)
            if (res1 != 0):
                raise RuntimeError(
                    "Could not get bottom camera handle. res = " + str(res1))

            #Initialize top camera stream
            _, _, _ = vrep.simxGetVisionSensorImage(clientID,
                                                    top_camera_handle, 0,
                                                    vrep.simx_opmode_streaming)

            #Initialize bottom camera stream
            _, _, _ = vrep.simxGetVisionSensorImage(clientID,
                                                    bottom_camera_handle, 0,
                                                    vrep.simx_opmode_streaming)
            time.sleep(0.5)
            return top_camera_handle, bottom_camera_handle
Esempio n. 16
0
def main():

    vrep.simxFinish(-1)  # just in case, close all opened connections
    clientID = vrep.simxStart('127.0.0.1', 20000, True, True, 1300,
                              5)  # Connect to V-REP
    if clientID == -1:
        sys.exit()
    print('Connected to remote API server')

    res, camhandle = vrep.simxGetObjectHandle(clientID, 'camara_1',
                                              vrep.simx_opmode_oneshot_wait)
    print(res)
    res, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, camhandle, 0, vrep.simx_opmode_streaming)

    ##############

    args = cli()

    # load model
    model, _ = nets.factory_from_args(args)
    model = model.to(args.device)
    processor = decoder.factory_from_args(args, model)

    visualizer = None
    while True:
        res, resolution, image = vrep.simxGetVisionSensorImage(
            clientID, camhandle, 0, vrep.simx_opmode_buffer)
        if len(image) == 0:
            continue
        img = np.array(image, dtype=np.uint8)
        img.resize([resolution[1], resolution[0], 3])
        img = np.rot90(img, 2)
        img = np.fliplr(img)
        cv2.imshow('t', img)
        cv2.waitKey(1)
        image = cv2.resize(img, None, fx=args.scale, fy=args.scale)
        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        if visualizer is None:
            visualizer = Visualizer(processor, args)(image)
            visualizer.send(None)

        start = time.time()
        image_pil = PIL.Image.fromarray(image)
        processed_image_cpu, _, __ = transforms.EVAL_TRANSFORM(
            image_pil, [], None)
        processed_image = processed_image_cpu.contiguous().to(
            args.device, non_blocking=True)
        #print('preprocessing time', time.time() - start)

        fields = processor.fields(torch.unsqueeze(processed_image, 0))[0]
        visualizer.send((image, fields))

        #print('loop time = {:.3}s, FPS = {:.3}'.format(
        #    time.time() - last_loop, 1.0 / (time.time() - last_loop)))
        last_loop = time.time()

    vrep.simxFinish(clientID)
Esempio n. 17
0
def streamVisionSensor(visionSensorName, clientID, pause=0.0001):
    # enable the synchronous mode on the client:
    vrep.simxSynchronous(clientID, 1)

    # start the simulation:
    vrep.simxStartSimulation(clientID, vrep.simx_opmode_blocking)

    # enable streaming of the iteration counter:
    res, iteration1 = vrep.simxGetIntegerSignal(clientID, "iteration",
                                                vrep.simx_opmode_streaming)
    print("iteration1: ", iteration1)
    #Get the handle of the vision sensor
    res1, visionSensorHandle = vrep.simxGetObjectHandle(
        clientID, visionSensorName, vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, vrep.simx_opmode_streaming)
    #Initialiazation of the figure
    time.sleep(0.5)
    res, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, visionSensorHandle, 0, vrep.simx_opmode_buffer)
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    # Init figure
    plt.ion()
    fig = plt.figure(1)
    plotimg = plt.imshow(im, origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    while (vrep.simxGetConnectionId(clientID) != -1):
        if iteration1 < 30:
            vrep.simxSynchronousTrigger(clientID)
            res, iteration1 = vrep.simxGetIntegerSignal(
                clientID, "iteration", vrep.simx_opmode_buffer)
            if res != vrep.simx_return_ok: iteration1 = -1
            iteration2 = iteration1
            while iteration2 == iteration1:  # wait until the iteration counter has changed
                res, iteration2 = vrep.simxGetIntegerSignal(
                    clientID, "iteration", vrep.simx_opmode_buffer)
                if res != vrep.simx_return_ok:
                    iteration2 = -1
            print(iteration2)

            #Get the image of the vision sensor
            res, resolution, image = vrep.simxGetVisionSensorImage(
                clientID, visionSensorHandle, 0, vrep.simx_opmode_buffer)
            #Transform the image so it can be displayed
            img = np.array(image, dtype=np.uint8).reshape(
                [resolution[1], resolution[0], 3])
            #Update the image
            fig.canvas.set_window_title(visionSensorName + '_' +
                                        str(iteration2))
            plotimg.set_data(img)
            plt.draw()
            plt.pause(pause)
        # vrep.simxSynchronousTrigger(clientID)
    print('End of Simulation')
 def setup(self):
     if self.clientID != -1:
         errorCode, handles, intData, floatData, array = vrep.simxGetObjectGroupData(self.clientID,
                                                                                     vrep.sim_appobj_object_type,
                                                                                     0,
                                                                                     vrep.simx_opmode_oneshot_wait)
         data = dict(zip(array, handles))
         self.camera = [value for key, value in data.items() if "Vision_sensor" in key][0]
         self.target = [value for key, value in data.items() if "Quadricopter_target" == key][0]
         vrep.simxGetVisionSensorImage(self.clientID, self.camera, 1, vrep.simx_opmode_streaming)
Esempio n. 19
0
def streamVisionSensor(visionSensorName,clientID,pause=0.0001):

  nn = newNeuralNetwork()
  #print("Neural Network num_inputs:{n}".format(n=nn.num_inputs))

  #Get the handle of the vision sensor
  res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
  #Get the image
  res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
  #Allow the display to be refreshed
  plt.ion()
  #Initialiazation of the figure
  time.sleep(0.5)
  res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
  im = I.new("RGB", (resolution[0], resolution[1]), "white")
  #Give a title to the figure
  im = im.crop((160, 0, 480, 480))
  fig = plt.figure(1)    
  fig.canvas.set_window_title(visionSensorName)
  #inverse the picture
  plotimg = plt.imshow(im,origin='lower')
  #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
  time.sleep(1)
  
  count = COUNTER
  while (vrep.simxGetConnectionId(clientID)!=-1):
    #Get the image of the vision sensor
    res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    #Transform the image so it can be displayed using pyplot
    image_byte_array = array.array('b',image)
    im = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "RGB", 0, 1)
    im = im.crop((160, 0, 480, 480))
    #Update the image

    plotimg.set_data(im)
    #Refresh the display
    plt.axis("off")
    plt.show()
    #The mandatory pause ! (or it'll not work)
    plt.pause(pause)
    if (nn):
      # feed the neural network with the image and get its prediction (if it has a plant)

      trainer = Trainer("live", imageIntoFeatures(im))
      answer = nn.predict(trainer.inputs)
      print(answer)
      #assertAnswer(answer)
      #assertAnswerStrong(answer)

    if (SAVE_VISION and count % 60 == 0):
      name = str(count//60) + ".png"
      print("Saving " + name)
      im.save(name)
    count += 1
  print('End of Simulation')
def pdControl(clientID, youBotCam, goal):
    print("begin PD control")

    wheelJoints = move.getWheelJoints(clientID)
    err, res, image = vrep.simxGetVisionSensorImage(clientID, youBotCam, 0,
                                                    vrep.simx_opmode_buffer)
    cv2Image = colorDet.convertToCv2Format(image, res)

    #cv2.imshow("Current Image of youBot", cv2Image)
    #cv2.waitKey(0)

    # initialize variables for while loop
    cor = getRedBlobPicture(cv2Image)
    if len(cor) == 0:
        cor = getBlueBlobPicture(cv2Image)
    preCor = cor
    rotVel = 0.0
    forwBackVel = 999  # just a random number to initialize while loop
    leftRightVel = 999  # just a random number to initialize while loop
    while not velOk(forwBackVel, leftRightVel):
        # retrieve new coordinate of red blob
        err, res, image = vrep.simxGetVisionSensorImage(
            clientID, youBotCam, 0, vrep.simx_opmode_buffer)
        cv2Image = colorDet.convertToCv2Format(image, res)
        cor = getRedBlobPicture(cv2Image)

        if len(cor) == 0:
            cor = getBlueBlobPicture(cv2Image)
        # calculate new velocities
        forwBackVel = 0.1 * (cor[0][0] - goal[0]) - 0.002 * (cor[0][0] -
                                                             preCor[0][0])
        leftRightVel = 0.08 * (cor[0][1] - goal[1]) - 0.001 * (cor[0][1] -
                                                               preCor[0][1])

        # save current coordinates as previous coordinates
        preCor = cor

        # update velocities
        move.setWheelVelocity(clientID, 0.0)

        vrep.simxPauseCommunication(clientID, True)
        for i in range(0, 4):
            vrep.simxSetJointTargetVelocity(
                clientID, wheelJoints[i],
                move.wheelVel(forwBackVel / 10.0, leftRightVel / 10.0,
                              rotVel)[i], vrep.simx_opmode_oneshot)
        vrep.simxPauseCommunication(clientID, False)

    # stop moving
    move.setWheelVelocity(clientID, 0.0)

    #cv2.imshow("Current Image of youBot", cv2Image)
    #cv2.waitKey(0)

    print("end PD control")
def collectImageData(clientID, action_arg):

    list_of_images = []

    collector = []
    if clientID!=-1:
        res,v0=vrep.simxGetObjectHandle(clientID,'Vision_sensor',vrep.simx_opmode_oneshot_wait)
        res,v1=vrep.simxGetObjectHandle(clientID,'PassiveVision_sensor',vrep.simx_opmode_oneshot_wait)
        ret_code, left_handle = vrep.simxGetObjectHandle(clientID,'DynamicLeftJoint', vrep.simx_opmode_oneshot_wait)
        ret_code, right_handle = vrep.simxGetObjectHandle(clientID,'DynamicRightJoint', vrep.simx_opmode_oneshot_wait)
        ret_code, base_handle = vrep.simxGetObjectHandle(clientID, 'LineTracerBase', vrep.simx_opmode_oneshot_wait)

        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,v0,0,vrep.simx_opmode_streaming)
        ret_code, euler_angles = vrep.simxGetObjectOrientation(clientID, base_handle, -1, vrep.simx_opmode_streaming)
        count = 0
        action = 2
        inference_counter = 0
        steps = 50
        action_length = 0

        while (vrep.simxGetConnectionId(clientID)!=-1 and count < steps):

            tim = time.time()
            for i in range(10):
                vrep.simxSynchronousTrigger(clientID)

            res,resolution,image=vrep.simxGetVisionSensorImage(clientID,v0,0,vrep.simx_opmode_buffer)

            if res==vrep.simx_return_ok:

                img = np.array(image,dtype=np.uint8)
                img.resize([resolution[1],resolution[0],3])
                rotate_img = img.copy()
                rotate_img = np.flipud(img)
                list_of_images.append(rotate_img)

                ret_code, pos = vrep.simxGetObjectPosition(clientID, base_handle, -1, vrep.simx_opmode_oneshot)
                ret_code, velo, angle_velo = vrep.simxGetObjectVelocity(clientID, base_handle, vrep.simx_opmode_oneshot)
                ret_code, euler_angles = vrep.simxGetObjectOrientation(clientID, base_handle, -1, vrep.simx_opmode_buffer)
                collector.append([pos[0], pos[1], pos[2], velo[0], velo[1], velo[2], euler_angles[0], euler_angles[1], euler_angles[2], action])
                if action_length < 1 and action_arg == -1:
                    action_length = random.randint(2,10)
                    action = np.random.randint(0, 5)
                elif action_arg > -1:
                    action = action_arg
                action_length = action_length - 1
            velo = (action -2)  * 15

            return_val = vrep.simxSetJointTargetVelocity(clientID, left_handle, velo, vrep.simx_opmode_oneshot)
            return_val2 = vrep.simxSetJointTargetVelocity(clientID, right_handle, velo, vrep.simx_opmode_oneshot_wait)
            count+=1
        return list_of_images, collector
    else:
        sys.exit()
Esempio n. 22
0
def getVisionSensor(visionSensorName,clientID):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    time.sleep(1)
    while (vrep.simxGetConnectionId(clientID)!=-1):
        #Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        print resolution
    print 'End of Simulation'
def getVisionSensor(visionSensorName,clientID):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    time.sleep(1)
    while (vrep.simxGetConnectionId(clientID)!=-1): 
        #Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        print resolution
    print 'End of Simulation'
Esempio n. 24
0
def streamVisionSensor(visionSensorName,clientID,pause,sample_time):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
    #Allow the display to be refreshed
    plt.ion()
    #Initialiazation of the figure
    time.sleep(0.5)
    res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    print resolution
    im = I.new("RGB", (resolution[0], resolution[1]), "white")
    #Give a title to the figure
    fig = plt.figure(1)    
    fig.canvas.set_window_title(visionSensorName)
    #inverse the picture
    plotimg = plt.imshow(im,origin='lower')
    #Let some time to Vrep in order to let him send the first image, otherwise the loop will start with an empty image and will crash
    time.sleep(1)
    img = I.new("RGB", (resolution[0], resolution[1]))
    while (vrep.simxGetConnectionId(clientID)!=-1): 
        start = time.time()
        #Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        #Transform the image so it can be displayed using pyplot
        image_byte_array = array.array('b',image)
        im = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "RGB", 0, 1)
        #Update the image
        plotimg.set_data(im)
        #Refresh the display
        plt.draw()
        #The mandatory pause ! (or it'll not work)
        plt.pause(pause)
        #Set the image for naoqi
        imagepixel = []#*(resolution[0]*resolution[1])
        for i in xrange(resolution[0] * resolution[1]):
            r = i*3
            g = r+1
            b = r+2
            imagepixel.append((image[b], image[g], image[r]))
        img.putdata(imagepixel)
        # print image
        # print imagepixel
        data = img.tostring()
        im = im.rotate(180)
        videoDeviceProxy.putImage(vision_definitions.kTopCamera, resolution[0], resolution[1], im.tostring())
        end = time.time()
        dt = end-start
        if dt < sample_time:
            time.sleep(sample_time - dt)
        else:
            print "Can't keep a period (%gs>%gs)" % (dt,sample_time)

    print 'End of Simulation'
Esempio n. 25
0
	def read_vision_sensor(self):
		"""
			Reads the image raw data from vrep vision sensor.
			Returns:
				resolution: Tuple with the image resolution.
				image: List with the image data.
		"""
		res, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.vision_handle, 0, vrep.simx_opmode_streaming)
		while(res != vrep.simx_return_ok):
			res, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.vision_handle, 0, vrep.simx_opmode_buffer)

		return resolution, image
Esempio n. 26
0
def getDepthImage(clientID):
    imageCount = 0  # to keep track of how many images have been processed
    processLimit = 5

    #    print ('Vision Sensor object handling')
    errorCode, rgbCamHandle = vrep.simxGetObjectHandle(
        clientID, 'kinect_rgb', vrep.simx_opmode_oneshot_wait)
    errorCode, xyzCamHandle = vrep.simxGetObjectHandle(
        clientID, 'kinect_pointcloud', vrep.simx_opmode_oneshot_wait)

    #    print ('Setting up First Calls for Reading Images and Sensor Values')
    # Note that the image takes a while to display so cannot load first image but
    # have to set up a buffer
    errorRGB, resolution, rgbImage = vrep.simxGetVisionSensorImage(
        clientID, rgbCamHandle, 0, vrep.simx_opmode_streaming)

    # api return value for the coordinate extraction filter in camera is as follows:
    # (1) point count along x, (2) point count along Y, (3) point x1, (4) point y1,
    # (5) point z1, (6) distance point1, (7) point x2, etc.
    # and auxPacket[1] holds  data in the format described above
    errorXYZ, detectionState, auxPackets = vrep.simxReadVisionSensor(
        clientID, xyzCamHandle, vrep.simx_opmode_streaming)

    # loop while connected to simulation
    while (vrep.simxGetConnectionId(clientID) != -1):

        # Read in camera information (Note operating mode changed to buffer)
        errorRGB, resolution, rgbImage = vrep.simxGetVisionSensorImage(
            clientID, rgbCamHandle, 0, vrep.simx_opmode_buffer)
        errorXYZ, detectionState, auxPackets = vrep.simxReadVisionSensor(
            clientID, xyzCamHandle, vrep.simx_opmode_buffer)

        errorCodes = [errorRGB, errorXYZ]

        # Check if have both image and xyz coordinate data available
        if (all(errors == vrep.simx_return_ok for errors in errorCodes)):
            # print ("image OK!!!")
            imageCount += 1

        elif (all(errors == vrep.simx_return_novalue_flag
                  for errors in errorCodes)):
            # print ("no image yet")
            pass
        else:
            # print (errorCodes)
            pass

        # exit if processed more than certain amount of images
        if imageCount > processLimit:
            break

    return auxPackets, resolution, rgbImage
Esempio n. 27
0
def renderSensorImage(clientID, camera, fname):
    errRender, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, camera, 0, vrep.simx_opmode_streaming)
    errRender, resolution, image = vrep.simxGetVisionSensorImage(
        clientID, camera, 0, vrep.simx_opmode_buffer)

    if errRender == vrep.simx_return_ok:
        img = np.array(image, dtype=np.uint8)
        img.resize([resolution[0], resolution[1], 3])
        img = cv2.flip(img, 0)
        img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        cv2.imwrite(fname, img)
    pass
Esempio n. 28
0
    def getVisionSensor(self):
        _, resolution, image = vrep.simxGetVisionSensorImage(
            self.clientID, self.visionSensor, 0, vrep.simx_opmode_streaming)
        time.sleep(0.1)
        _, resolution, image = vrep.simxGetVisionSensorImage(
            self.clientID, self.visionSensor, 0, vrep.simx_opmode_buffer)

        sensorImage = np.array(image, dtype=np.uint8)
        image_1 = sensorImage.flatten()
        sensorImage.resize([resolution[0], resolution[1], 3])

        mpl.imshow(sensorImage, origin='lower')
        return image_1, sensorImage
Esempio n. 29
0
 def getImage(self, vision_handle_number):
     if self.synchronous:
         err, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.vision_handles[vision_handle_number], 0, vrep.simx_opmode_blocking)
         while err != vrep.simx_return_ok:
             self.step_forward()
             err, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.vision_handles[vision_handle_number], 0, vrep.simx_opmode_blocking)
     else:
         err, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.vision_handles[vision_handle_number], 0, vrep.simx_opmode_streaming)
         while err != vrep.simx_return_ok:
             err, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.vision_handles[vision_handle_number], 0, vrep.simx_opmode_buffer)
     print("Image OK!")
     img = np.array(image,dtype=np.uint8)
     img.resize([resolution[1],resolution[0],3])
     return img
Esempio n. 30
0
    def get_camera_image(self):
        data = {
            'res': -1,
            'err_list': ["camera isn't connected to client"],
            'data': {
                'image': np.zeros((128, 128, 3)),
                'resolution': (128, 128, 3)
            }
        }
        if self.components['camera']['id'] != None:
            res, resolution, image = vrep.simxGetVisionSensorImage(
                self.client_id, self.components['camera']['id'], 0,
                vrep.simx_opmode_streaming)
            data['res'] = res
            data['err_list'] = err_list = parse_error(res)
            print resolution
            if res not in (0, 1) and self.debug:
                err_print(prefix='CAMERA', message=err_list)
            elif len(resolution) > 1:
                resolution = (resolution[0], resolution[1], 3)
                data['data']['resolution'] = resolution
                mat_image = np.array(image, dtype=np.uint8)
                mat_image.resize([resolution[0], resolution[1], 3])

                data['data']['image'] = mat_image[::-1]
        return data
    def get_camera_data(self):
        """
            -- Read images data from vrep and convert into np array
        """
        # Get color image from simulation
        res, resolution, raw_image = vrep.simxGetVisionSensorImage(
            self.clientID, self.kinectRGB_handle, 0,
            vrep.simx_opmode_oneshot_wait)
        self._error_catch(res)
        color_img = np.array(raw_image, dtype=np.uint8)
        color_img.shape = (resolution[1], resolution[0], 3)
        color_img = color_img.astype(np.float) / 255
        color_img[color_img < 0] += 1
        color_img *= 255
        color_img = np.fliplr(color_img)
        color_img = color_img.astype(np.uint8)

        # Get depth image from simulation
        res, resolution, depth_buffer = vrep.simxGetVisionSensorDepthBuffer(
            self.clientID, self.kinectDepth_handle,
            vrep.simx_opmode_oneshot_wait)
        self._error_catch(res)
        depth_img = np.array(depth_buffer)
        depth_img.shape = (resolution[1], resolution[0])
        depth_img = np.fliplr(depth_img)
        # zNear = 0.01
        # zFar = 10
        # depth_img = depth_img * (zFar - zNear) + zNear
        depth_img[depth_img < 0] = 0
        depth_img[depth_img > 1] = 1
        depth_img = depth_img * self.INT16
        return depth_img, color_img
    def __init__(self):

        vrep.simxFinish(-1)
        self.clientID = vrep.simxStart('127.0.0.1', 19997, True, True, 5000, 5)

        # start simulation
        rc = vrep.simxStartSimulation(self.clientID,
                                      vrep.simx_opmode_oneshot_wait)

        # object handles
        res, self.quad_obj = vrep.simxGetObjectHandle(
            self.clientID, 'Quadricopter', vrep.simx_opmode_oneshot_wait)
        res, self.camera = vrep.simxGetObjectHandle(
            self.clientID, 'Vision_sensor', vrep.simx_opmode_oneshot_wait)

        # Initialise data streaming from V-REP
        err, resolution, image = vrep.simxGetVisionSensorImage(
            self.clientID, self.camera, 0, vrep.simx_opmode_streaming)
        _, pos = vrep.simxGetObjectPosition(self.clientID, self.quad_obj, -1,
                                            vrep.simx_opmode_streaming)
        time.sleep(2)

        # Variables
        _, self.last_pos = vrep.simxGetObjectPosition(self.clientID,
                                                      self.quad_obj, -1,
                                                      vrep.simx_opmode_buffer)
 def _vrep_get_vision_sensor_image(self,
                                   camera_handle,
                                   opmode=vrep.simx_opmode_buffer,
                                   a=0):
     return vrep.unwrap_vrep(
         vrep.simxGetVisionSensorImage(self._clientID, camera_handle, a,
                                       opmode))
Esempio n. 34
0
def dvs_vision(cid, handle, height=32, width=32):
    err, res, image = vrep.simxGetVisionSensorImage(cid, handle, 1, vrep_mode)
    # Return zeroes if nothing is in the input buffer
    if err == 1:
        return np.zeros(height * width)
    #return np.array(image).ravel()==0
    return (np.array(image).ravel()==0) + (np.array(image).ravel()==-1)
def findAllBlobs(clientId, youBotCam, homoMatrix):

    currentDegree = 0
    blobList = []
    #we will rotate for 180 degree for spotting the blobs
    i = 0
    while (i < 4):
        i += 1

        err, res, image = vrep.simxGetVisionSensorImage(
            clientId, youBotCam, 0, vrep.simx_opmode_buffer)

        if err == vrep.simx_return_ok:
            # do some image stuff ----------------------------------------------------------------------------------

            cv2Image = convertToCv2Format(image, res)

            tempBlobs = getBlobsGlobal(cv2Image, homoMatrix, clientId)

            count = 0
            for tb in tempBlobs:
                count = 0
                for b in blobList:
                    if move.isSamePoint(tb[0], b[0]) and tb[1] == b[1]:
                        count = count + 1

                if count == 0:
                    print("added", tb, " to blobList")
                    blobList.append(tb)

        move.rotate(10, clientId, True)

    return blobList
def exercise4_action(clientID, youBotCam):
    counter = 1

    while counter < 5:
        # get further images from vision sensor
        err, res, image = vrep.simxGetVisionSensorImage(
            clientID, youBotCam, 0, vrep.simx_opmode_buffer)

        if err == vrep.simx_return_ok:
            # do some image stuff ----------------------------------------------------------------------------------

            cv2Image = convertToCv2Format(image, res)
            cv2ImageCopy = cv2Image

            imageContours = getContours(cv2Image, boundariesBlue)

            cv2.drawContours(cv2ImageCopy, imageContours, 0, (255, 0, 0), 1)
            cv2.imshow("Current Image of youBot", cv2ImageCopy)

            # calculate center of green blob
            x, y = calcCenter(imageContours[0])

            print("Current center of the green blob: x={} y={}".format(x, y))

            cv2.waitKey(0)  # key to get out of waiting is ESC

            # end some image stuff ---------------------------------------------------------------------------------

        move.forward(0.1, clientID)

        counter += 1
def get_H_matrix(chessboard_corners, youBotCam, clientID):
    print("Begin calculation of H-matrix, please wait ...")
    err, res, image = vrep.simxGetVisionSensorImage(clientID, youBotCam, 0,
                                                    vrep.simx_opmode_buffer)
    image = convertToCv2Format(image, res)
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

    found, prime_corners = cv2.findChessboardCorners(image, (3, 4))

    prime_corners = addOne(prime_corners)

    # convert all global corners of the chessboard in egocentric world space
    ego_corners = []
    for gc in chessboard_corners:
        newCorner = globalToEgocentric(gc, clientID)
        ego_corners.append(newCorner)

    # add a 1 in every row (globalToEgocentric only returns x,y coordinates
    ego_corners = addOne(ego_corners)

    # convert ego_corners in numpy array
    ego_corners = np.asarray(ego_corners)

    # calculate H-matrix
    A = getA(prime_corners, ego_corners)
    return getH(A)
Esempio n. 38
0
 def getImage (self):
     #errorCode, resolution, image=vrep.simxGetVisionSensorImage(self.clientID, self.camera, 0, vrep.simx_opmode_streaming)
     #print "pillando imagen"
     #time.sleep(1) #necesario        
     errorCode, resolution, image=vrep.simxGetVisionSensorImage(self.clientID, self.camera, 0, vrep.simx_opmode_oneshot_wait)        
     im=np.array(image, dtype=np.uint8)
     im.resize([resolution[1], resolution[0],3])
     im=np.flipud(im)
     return im
Esempio n. 39
0
    def moveAndReadLights(self):
        "rotate in place and print light readings"
        eCode, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 0, vrep.simx_opmode_streaming)
        ecode, res, leftEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye, 0, vrep.simx_opmode_streaming)
        vrep.simxSynchronousTrigger(self.simulID)

        for step in xrange(self.noSteps):
            rightSpeed = 25
            leftSpeed = rightSpeed
            eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.rightMotor, rightSpeed, vrep.simx_opmode_oneshot_wait)
            eCode = vrep.simxSetJointTargetVelocity(self.simulID, self.leftMotor, leftSpeed, vrep.simx_opmode_oneshot_wait)
            vrep.simxSynchronousTrigger(self.simulID)
            eCodeR, res, rightEyeRead = vrep.simxGetVisionSensorImage(self.simulID, self.rightEye, 0, vrep.simx_opmode_buffer)
            eCodeL, res, leftEyeRead  = vrep.simxGetVisionSensorImage(self.simulID, self.leftEye,  0, vrep.simx_opmode_buffer)
            vrep.simxSynchronousTrigger(self.simulID)
#             print "Right eCode:\t", eCodeR,
#             print "Left eCode:\t", eCodeL
#             leftImg = np.array(leftEyeRead, np.uint8)
#             rightImg.resize(res[0],res[1],3)
            print "Right:\t%d, %d\tLeft:\t%d, %d"% (len(rightEyeRead),sum(rightEyeRead), len(leftEyeRead),sum(leftEyeRead))
	def get_camera_image(self):
		'Get the image from the camera'

		err, resolution, image = vrep.simxGetVisionSensorImage(self.clientID, self.camera, 0, vrep.simx_opmode_buffer)
		img = np.array(image,dtype=np.uint8)
		img.resize([resolution[1],resolution[0],3])
		img = np.fliplr(img)
		temp = np.copy(img[:,:,0])
		img[:,:,0] = np.copy(img[:,:,2])
		img[:,:,2] = temp 
		return img 
def fetchKinect(depthSTR,rgbSTR):
        errorCodeKinectRGB,kinectRGB=vrep.simxGetObjectHandle(clientID,rgbSTR,vrep.simx_opmode_oneshot_wait)
        errorCodeKinectDepth,kinectDepth=vrep.simxGetObjectHandle(clientID,depthSTR,vrep.simx_opmode_oneshot_wait)

        
        errorHere,resolution,image=vrep.simxGetVisionSensorImage(clientID,kinectRGB,0,vrep.simx_opmode_oneshot_wait)
        img,imgArr=Raftaar.ProcessImage(image,resolution)        
        rgbArr=np.array(imgArr)

        errorHere,resol,depth=vrep.simxGetVisionSensorDepthBuffer(clientID,kinectDepth,vrep.simx_opmode_oneshot_wait)
        depthArr=np.array(depth)


        return depthArr,rgbArr
Esempio n. 42
0
	def get_and_process_image(self):
		"""
			returns the aggregated data of bubbleRobs Vision Sensor
		"""
		err,res,img = vrep.simxGetVisionSensorImage(self.clientID,self.visionSensorHandle,0,vrep.simx_opmode_oneshot_wait)

		colval = numpy.zeros((res[0]*res[1],3))
		i = 0
		for pix in range(res[0]*res[1]):
			for col in range(3):
				if img[i] >= 0:
					colval[pix][col] = img[i]
				else:
					colval[pix][col] = img[i] + 256
				i += 1

		red = 0.0
		red_position = 0.0
		green = 0.0
		green_position = 0.0
		blue = 0.0
		blue_position = 0.0

		i = 0
		for pix in colval:
			i += 1
			position = (i % 32)
			if (pix[0] > 200 and pix[1] < 100 and pix[2] < 100):
				red += 1
				red_position += -1 + position * 0.0625 
			elif (pix[0] < 100 and pix[1] > 200 and pix[2] < 100):
				green += 1
				green_position += -1 + position * 0.0625
			elif (pix[0] < 100 and pix[1] < 100 and pix[2] > 200):
				blue += 1
				blue_position += -1 + position * 0.0625
		
		if (red > 0):
			red_position = red_position / red
		if (green > 0):
			green_position = green_position / green
		if (blue > 0):
			blue_position = blue_position / blue

		input_vars = [red / 256, red_position, green / 256, green_position, blue / 256, blue_position]
		return input_vars
	def __init__(self,port_number):

		vrep.simxFinish(-1)
		self.clientID = vrep.simxStart('127.0.0.1', port_number, True, True, 5000, 5)

		# start simulation
		rc = vrep.simxStartSimulation(self.clientID, vrep.simx_opmode_oneshot_wait)
		
		# object handles
		res, self.quad_obj = vrep.simxGetObjectHandle(self.clientID, 'Quadricopter', vrep.simx_opmode_oneshot_wait)
		res, self.camera   = vrep.simxGetObjectHandle(self.clientID, 'Vision_sensor', vrep.simx_opmode_oneshot_wait)
		res, self.target   = vrep.simxGetObjectHandle(self.clientID, 'Quadricopter_target', vrep.simx_opmode_oneshot_wait)
		
		# Initialise data streaming from V-REP
		err, resolution, image  = vrep.simxGetVisionSensorImage(self.clientID, self.camera, 0, vrep.simx_opmode_streaming)
		_,pos                   = vrep.simxGetObjectPosition(self.clientID, self.quad_obj, -1, vrep.simx_opmode_streaming)
		_,target_pos            = vrep.simxGetObjectPosition(self.clientID, self.target, -1, vrep.simx_opmode_streaming)
		
		time.sleep(2)

		# Variables
		_,self.last_pos = vrep.simxGetObjectPosition(self.clientID, self.quad_obj, -1, vrep.simx_opmode_buffer)
Esempio n. 44
0
    def initilize_vrep_api(self):
        # initialize ePuck handles and variables
        _, self.bodyelements=vrep.simxGetObjectHandle(
            self.clientID, 'ePuck_bodyElements', vrep.simx_opmode_oneshot_wait)
        _, self.leftMotor=vrep.simxGetObjectHandle(
            self.clientID, 'ePuck_leftJoint', vrep.simx_opmode_oneshot_wait)
        _, self.rightMotor=vrep.simxGetObjectHandle(
            self.clientID, 'ePuck_rightJoint', vrep.simx_opmode_oneshot_wait)
        _, self.ePuck=vrep.simxGetObjectHandle(
            self.clientID, 'ePuck', vrep.simx_opmode_oneshot_wait)
        _, self.ePuckBase=vrep.simxGetObjectHandle(
            self.clientID, 'ePuck_base', vrep.simx_opmode_oneshot_wait)
        # proxSens = prox_sens_initialize(self.clientID)
        # initialize odom of ePuck
        _, self.xyz = vrep.simxGetObjectPosition(
            self.clientID, self.ePuck, -1, vrep.simx_opmode_streaming)
        _, self.eulerAngles = vrep.simxGetObjectOrientation(
            self.clientID, self.ePuck, -1, vrep.simx_opmode_streaming)

        # initialize overhead cam
        _, self.overheadCam=vrep.simxGetObjectHandle(
            self.clientID, 'Global_Vision', vrep.simx_opmode_oneshot_wait)
        _, self.resolution, self.image = vrep.simxGetVisionSensorImage(
            self.clientID,self.overheadCam,0,vrep.simx_opmode_oneshot_wait)

        # initialize goal handle + odom
        _, self.goalHandle=vrep.simxGetObjectHandle(
            self.clientID, 'Goal_Position', vrep.simx_opmode_oneshot_wait)
        _, self.goalPose = vrep.simxGetObjectPosition(
            self.clientID, self.goalHandle, -1, vrep.simx_opmode_streaming)

        # STOP Motor Velocities.  Not sure if this is necessary
        _ = vrep.simxSetJointTargetVelocity(
            self.clientID,self.leftMotor,0,vrep.simx_opmode_oneshot_wait)
        _ = vrep.simxSetJointTargetVelocity(
            self.clientID,self.rightMotor,0,vrep.simx_opmode_oneshot_wait)
Esempio n. 45
0
        turnToTargetAngle (getTargetAngle(target_x,target_y))
        moveRobot(1,1)
        time.sleep(0.2)
        print getDistanceToTarget(target_x, target_y)
        
    print "punto!"
    moveRobot(0,0)


#moveRobot(-0.5,0.5);4

printRobotLocation()
#print getAngleToTarget(5,5)


errorCode, resolution, image=vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_streaming)
time.sleep(1)

errorCode, resolution, image=vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_buffer)
errorCode, resolution, image=vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_buffer)
errorCode, resolution, image=vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_buffer)
errorCode, resolution, image=vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_buffer)
errorCode, resolution, image=vrep.simxGetVisionSensorImage(clientID, camera, 0, vrep.simx_opmode_buffer)

print "hola"

im=np.array(image, dtype=np.uint8)
im.resize([resolution[1], resolution[0],3])

#mlp.imshow(im)
Esempio n. 46
0
    def robot_code(self):
        t = time.time()

        self.curr_goal = 0
        while (time.time() - t) < 200:

            # global map
            _,resolution,image = vrep.simxGetVisionSensorImage(self.clientID,self.overheadCam,0,vrep.simx_opmode_oneshot_wait) # Get image
            im = self.global_map_preprocess(resolution, image)

            self.pose = self.robot_pose_get()
            x, y, theta = self.pose
            # theta = self.theta_history.weighted_average(scheme='last', mathfunc=lambda x: np.exp(x))

            # initialize worldNorthTheta for the first time
            if self.worldNorthTheta is None:
                self.worldNorthTheta = self.pose[2]
                _ = [self.theta_history.append(self.pose[2]) for _ in range(self.history_length)]

            self.pose_pixel = np.array(odom2pixelmap(x, y, self.map_side_length, im.shape[0]))
            m, n = self.pose_pixel


            self.paths, self.blurred_paths = self.global_map_process(im)

            # calculate intermediate goals once
            if self.GOALS is None:
                raw_input("")
                # acquire pixel location of goal
                _, finishPose = vrep.simxGetObjectPosition(
                    self.clientID, self.goalHandle, -1, vrep.simx_opmode_buffer)
                self.finish_pixel = odom2pixelmap(finishPose[0], finishPose[1], self.map_side_length, im.shape[0])
                contiguousPath = AStarBinaryGrid(self.blurred_paths).calculate_path(self.pose_pixel, self.finish_pixel)
                self.GOALS = contiguousPath[::self.path_skip]
                # SKIP THIS FIRST LOOP AND CONTINUE
                continue
            else:
                self.goal_pose_pixel = np.array(self.GOALS[self.curr_goal])
                goal_m, goal_n = self.goal_pose_pixel
            lidarValues = self.lidar_scan_get(window_size=21)


            #############################
            # Potential Field Algorithm #
            #############################
            repulsionVectors = self.repulsion_vectors_compute(lidarValues, k_repulse=10.0)
            attractionVector = self.attraction_vector_compute(k_attract=100.0)

            finalVector = np.sum(np.vstack((repulsionVectors, attractionVector)), axis=0)
            finalUnitVector = finalVector / np.linalg.norm(finalVector)
            print "finalUnitVector: ", finalUnitVector

            # TODO: do we use the unit vector (for direction) only, or the finalVector?
            finalValue, finalAngle = cart2pol(finalUnitVector[0], finalUnitVector[1])

            error_theta = angle_diff(mapTheta2worldTheta(finalAngle, self.worldNorthTheta), theta)
            self.e_theta_h.append(error_theta)

            k_angular_p = 2.0 * self.maxVelocity
            k_angular_D = 0.25
            k_angular_S = 1.0
            k_angular_I = 2.5
            omega = k_angular_p * (
                  error_theta
                + k_angular_D / k_angular_S * (self.e_theta_h[-1] - self.e_theta_h[-2])
                + k_angular_S / k_angular_I * sum(self.e_theta_h)
            )
            print "Omega: ", round(omega,1)

            #############################
            # StateController Algorithm #
            #############################
            # if desired heading is not directly in front
            if np.abs(error_theta) > np.pi / 3:
                # turn in place
                forward_vel = self.maxVelocity
                # omega *= 0.25
            else:
                # Direct yourself to the goal
                goal_distance = np.linalg.norm(self.goal_pose_pixel - self.pose_pixel)
                print "distance_from_goal: ", goal_distance
                if goal_distance <= 4:
                    # Achieved Goal!
                    forward_vel = self.maxVelocity * 0.25 # Slow down to prepare for the next one
                    self.curr_goal += 1
                else:
                    forward_vel = self.maxVelocity

            # control the motors
            ctrl_sig_left, ctrl_sig_right = vomega2bytecodes(forward_vel, omega, g=1)
            _ = vrep.simxSetJointTargetVelocity(
                self.clientID,self.leftMotor,ctrl_sig_left,vrep.simx_opmode_oneshot_wait) # set left wheel velocity
            _ = vrep.simxSetJointTargetVelocity(
                self.clientID,self.rightMotor,ctrl_sig_right,vrep.simx_opmode_oneshot_wait) # set right wheel velocity

            self.idash.add(lambda: self.plot_maze(self.blurred_paths*1.0, m, n, goal_m, goal_n))
            self.idash.add(lambda: self.plot_maze(im, m, n, goal_m, goal_n))
            def plot_current_and_desired_heading():
                self.plot_unit_quiver(finalUnitVector, 'r')
                self.plot_unit_quiver(pol2cart(1, worldTheta2mapTheta(theta, self.worldNorthTheta)), 'k')
                plt.title("Error Theta: %f" % error_theta)
            self.idash.add(plot_current_and_desired_heading)
            self.idash.add(self.plot_theta_history)

            self.idash.plotframe()

            if self.killer.kill_now:
                self.clean_exit()
Esempio n. 47
0
Implemetasikan algoritma Kalman Filter 2D pada class kalman2d.kalman_prediction_2d

'''


#Get motor handle
errorCode,left_motor=vrep.simxGetObjectHandle(clientID,'Pioneer_p3dx_leftMotor',vrep.simx_opmode_oneshot_wait)
errorCode,right_motor=vrep.simxGetObjectHandle(clientID,'Pioneer_p3dx_rightMotor',vrep.simx_opmode_oneshot_wait)    

# Get ultrasound sensors handle
errorCode,usensor1=vrep.simxGetObjectHandle(clientID,'Pioneer_p3dx_ultrasonicSensor8',vrep.simx_opmode_oneshot_wait)           

# Get camera sensors handle
errorCode,cam_handle=vrep.simxGetObjectHandle(clientID,'Camera',vrep.simx_opmode_oneshot_wait)           
errorCode,resolution,image=vrep.simxGetVisionSensorImage(clientID,cam_handle,0,vrep.simx_opmode_oneshot_wait)

pX = pY = 0.0
for i in range (1,30) :
    errorCode,gpsX=vrep.simxGetFloatSignal(clientID,'gpsX',vrep.simx_opmode_oneshot_wait);
    errorCode,gpsY=vrep.simxGetFloatSignal(clientID,'gpsY',vrep.simx_opmode_oneshot_wait);
    pX += gpsX
    pY += gpsY
    
offSetX = (pX/30) - initX
offSetY = (pY/30) - initY
    

for i in range(1,11):
#while True :
    print 'Iteration : ', i
Esempio n. 48
0
    try:
        postureProxy = ALProxy("ALRobotPosture", robotIP, robotPort)
    except Exception, e:
        print "Could not create proxy to ALRobotPosture"
        print "Error was: ", e

	#INIT V-REP IMAGE SENSOR	
    vrep.simxFinish(-1)
    clientID=vrep.simxStart('127.0.0.2',19998,True,True,5000,5)
    if clientID!=-1:
        print('Connected to remote API server')
        #Get and display the pictures from the camera
        #Get the handle of the vision sensor
        res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,'NAO_vision1',vrep.simx_opmode_oneshot_wait)
        #Get the image
        res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_streaming)
        #Allow the display to be refreshed
        #plt.ion()
        #Initialiazation of the figure
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
        time.sleep(1)
        im = I.new("RGB", (640,480), "white")
        #Give a title to the figure
        #fig = plt.figure(1)    
        #fig.canvas.set_window_title('NAO_vision1')
        #inverse the picture
        #plotimg = plt.imshow(im,origin='lower')
		
    # Set NAO in Stiffness On
    StiffnessOn(motionProxy)
Esempio n. 49
0
    vrep.simxPauseCommunication(clientID,False)

    # give the arm some time to move
    time.sleep(1)

    # verify that the arm has approximately reached its target position
    err0 = vrep.simxGetJointPosition(clientID,armJoint0Handle,vrep.simx_opmode_streaming)[1] - rand_base_rotate
    err1 = vrep.simxGetJointPosition(clientID,armJoint1Handle,vrep.simx_opmode_streaming)[1] - rand_base_lower
    err2 = vrep.simxGetJointPosition(clientID,armJoint2Handle,vrep.simx_opmode_streaming)[1] - rand_elbow_bend
    err3 = vrep.simxGetJointPosition(clientID,armJoint3Handle,vrep.simx_opmode_streaming)[1] - rand_hand_bend
    err4 = vrep.simxGetJointPosition(clientID,armJoint4Handle,vrep.simx_opmode_streaming)[1] - 0.0
    joint_pos_err = abs(err0) + abs(err1) + abs(err2) + abs(err3) + abs(err4)
    print "joint positioning error =", joint_pos_err

    # get image from vision sensor 1
    err,res,img = vrep.simxGetVisionSensorImage(clientID,visionSensor1Handle,0,vrep.simx_opmode_oneshot_wait)
    colval1 = img_to_numpy(res,img)
    # get the binary image marking the cyan ball in Youbot's gripper
    binval1 = img_binarise(colval1,0.0,0.15,0.7,0.99,0.9,1.0)
    # get the x- and y-coordinates of the cyan ball
    binval1 = numpy.reshape(binval1,(res[1],res[0]))
    pos_img1 = img_find_cm(binval1)
    print "pos_img1 =", pos_img1

    # get image from vision sensor 2
    err,res,img = vrep.simxGetVisionSensorImage(clientID,visionSensor2Handle,0,vrep.simx_opmode_oneshot_wait)
    colval2 = img_to_numpy(res,img)
    # get the binary image marking the cyan ball in Youbot's gripper
    binval2 = img_binarise(colval2,0.0,0.15,0.7,0.99,0.9,1.0)
    # get the x- and y-coordinates of the cyan ball
    binval2 = numpy.reshape(binval2,(res[1],res[0]))
Esempio n. 50
0
def JointControlWhileTakingPictures(clientID,motionProxy,i,Body, naoDirectory, visionSensorName):
    #Get the handle of the vision sensor
    res1,visionSensorHandle=vrep.simxGetObjectHandle(clientID,visionSensorName,vrep.simx_opmode_oneshot_wait)
    #Get the image
    res2,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_buffer)
    time.sleep(1)
    #Head
    while(vrep.simxGetConnectionId(clientID)!=-1):
        #Getting joint's angles from Choregraphe (please check your robot's IP)
        commandAngles = motionProxy.getAngles('Body', False)
        #Allow the robot to move in VRep using choregraphe's angles
        
        vrep.simxSetJointTargetPosition(clientID,Body[0][i],commandAngles[0],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[1][i],commandAngles[1],vrep.simx_opmode_streaming)
        #Left Leg
        vrep.simxSetJointTargetPosition(clientID,Body[2][i],commandAngles[8],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[3][i],commandAngles[9],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[4][i],commandAngles[10],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[5][i],commandAngles[11],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[6][i],commandAngles[12],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[7][i],commandAngles[13],vrep.simx_opmode_streaming)
        #Right Leg
        vrep.simxSetJointTargetPosition(clientID,Body[8][i],commandAngles[14],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[9][i],commandAngles[15],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[10][i],commandAngles[16],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[11][i],commandAngles[17],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[12][i],commandAngles[18],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[13][i],commandAngles[19],vrep.simx_opmode_streaming)
        #Left Arm
        vrep.simxSetJointTargetPosition(clientID,Body[14][i],commandAngles[2],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[15][i],commandAngles[3],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[16][i],commandAngles[4],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[17][i],commandAngles[5],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[18][i],commandAngles[6],vrep.simx_opmode_streaming)
        #Right Arm
        vrep.simxSetJointTargetPosition(clientID,Body[19][i],commandAngles[20],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[20][i],commandAngles[21],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[21][i],commandAngles[22],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[22][i],commandAngles[23],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[23][i],commandAngles[24],vrep.simx_opmode_streaming)
        #Left Fingers
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][0],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][1],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][2],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][3],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][4],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][5],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][6],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[25][i][7],1.0-commandAngles[7],vrep.simx_opmode_streaming)
        #Right Fingers
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][0],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][1],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][2],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][3],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][4],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][5],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][6],1.0-commandAngles[25],vrep.simx_opmode_streaming)
        vrep.simxSetJointTargetPosition(clientID,Body[27][i][7],1.0-commandAngles[25],vrep.simx_opmode_streaming)
		
		#Get the image of the vision sensor
        res,resolution,image=vrep.simxGetVisionSensorImage(clientID,visionSensorHandle,0,vrep.simx_opmode_oneshot_wait)
        #Transform the image so it can be displayed using pyplot
        image_byte_array = array.array('b',image)
        im = I.frombuffer("RGB", (640,480), image_byte_array, "raw", "RGB", 0, 1)
		#Save the image to disk
        date_string = datetime.datetime.now().strftime("%Y-%m-%d-%H-%M-%S")
        naoDirectory = 'D:\_dev\mo416-final-project\Images\Semaforo_verde'
        file_path = naoDirectory + '\img_' + date_string + '.png'
        print file_path
        rotated_im = im.transpose(I.ROTATE_180)
        rotated_im.save(file_path,"PNG")	
    print 'End of simulation'
if clientID!=-1:
	print 'Connected to V-REP'

	# get vision sensor object handle 
	res, vs = vrep.simxGetObjectHandle(clientID, 'Vision_sensor', vrep.simx_opmode_oneshot_wait)

	# getting other object handles
	res, quad = vrep.simxGetObjectHandle(clientID, 'Quadricopter', vrep.simx_opmode_oneshot_wait)
	time.sleep(2)
	res, marker = vrep.simxGetObjectHandle(clientID, 'Plane1', vrep.simx_opmode_oneshot_wait)
	time.sleep(2)

	print quad, marker

	# start getting data from vision sensor
	err, resolution, image = vrep.simxGetVisionSensorImage(clientID, vs, 0, vrep.simx_opmode_streaming)
	time.sleep(2)

	
	
	while True:

		err, resolution, image = vrep.simxGetVisionSensorImage(clientID, vs, 0, vrep.simx_opmode_buffer)
		img = np.array(image,dtype=np.uint8)
		img.resize([resolution[1],resolution[0],3])
		img = np.fliplr(img)

		temp = np.copy(img[:,:,0])
		img[:,:,0] = np.copy(img[:,:,2])
		img[:,:,2] = temp 
Esempio n. 52
0
            time1 = input("Time of flight: ")
            vrep.simxSetObjectFloatParameter(clientID, obj1, vrep.sim_shapefloatparam_init_velocity_z, z_vel, vrep.simx_opmode_blocking)
            vrep.simxSetObjectFloatParameter(clientID, obj1, vrep.sim_shapefloatparam_init_velocity_x, x_vel, vrep.simx_opmode_blocking)
            vrep.simxStartSimulation(clientID, vrep.simx_opmode_blocking)

            t_end = time.time() + time1
            #while time.time() < t_end:
            for i in range(0,10):

                errorCodeKinectRGB,kinectRGB=vrep.simxGetObjectHandle(clientID,'kinect_rgb',vrep.simx_opmode_blocking)
                #errorHere,resolution,image=vrep.simxGetVisionSensorImage(clientID,kinectRGB,0,vrep.simx_opmode_blocking)
                img_time=vrep.simxGetLastCmdTime(clientID)


                #errorCodeKinectDepth,kinectDepth=vrep.simxGetObjectHandle(clientID,depthSTR,vrep.simx_opmode_blocking)
                errorHere,resolution,image=vrep.simxGetVisionSensorImage(clientID,kinectRGB,0,vrep.simx_opmode_oneshot_wait)
                image_byte_array = array.array('b', image)
                image_buffer = I.frombuffer("RGB", (resolution[0],resolution[1]), image_byte_array, "raw", "BGR", 0, 1)
                image_buffer  = image_buffer.transpose(Image.FLIP_LEFT_RIGHT)
                image_buffer  = image_buffer.transpose(Image.ROTATE_180)

                img2 = np.asarray(image_buffer)
                img_time=vrep.simxGetLastCmdTime(clientID)
                #print(img_time)

                #cv2.imwrite(str(img_time)+'.png',img2)

                time1=vrep.simxGetLastCmdTime(clientID)

                pose = getPosition(clientID)
                pose_list.append(pose)
 def _read_camera(self):
     data = vrep.simxGetVisionSensorImage(self.clientID,self.camera,1,vrep.simx_opmode_buffer)
     if data[0] == vrep.simx_return_ok :
         return data
     return None