示例#1
0
from controller.gripper.gripper_control import Gripper_Controller
from controller.ur5.ur_controller import UR_Controller
import time
import numpy as np

urc = UR_Controller()
grc = Gripper_Controller()

urc.start()
grc.start()

# pose0 = np.array([-0.252, -0.138, 0.067, 0.013, -2.121, 2.313])
pose0 = np.array([-0.539, 0.312, 0.321, -1.787, -1.604, -0.691])
pose1 = np.array([-0.481, 0.283, 0.359, -1.6, -1.480, -1.031])
pose2 = np.array([-0.481, 0.283, 0.359, -1.216, -1.480, -.8])


# grc.gripper_helper.set_gripper_current_limit(0.4)


grc.follow_gripper_pos = 0
# c = input()
# grc.follow_gripper_pos = 1

#
#
# a = 0.05
# v = 0.05
# urc.movel_wait(pose0, a=a, v=v)
# #
# c = input()
from perception.kinect.kinect_camera import Kinect
from scipy.spatial.transform import Rotation as R
from controller.gripper.gripper_control_v2 import Gripper_Controller_V2
from scipy.interpolate import griddata
from perception.fabric.util.segmentation import segmentation
from perception.fabric.run import Run
from PIL import Image
import skimage.measure
import torchvision.transforms as T
import os




################################### initialize ur5
urc = UR_Controller()
urc.start()

################################### initialize rx150
rx150_thread = RX150_Driver_Thread(port="/dev/ttyACM0", baudrate=1000000)
rx150_thread.rx150.torque(enable=1)
rx150_thread.start()


################################### initialize gripper
# Start gripper
# grc = Gripper_Controller_V2()
# grc.follow_gripper_pos = 1.2
# grc.follow_dc = [0, 0]
# grc.gripper_helper.set_gripper_current_limit(0.3)
# grc.start()
示例#3
0
cam_intrinsics_origin = np.array([[917.3927285, 0., 957.21294894],
                                  [0., 918.96234057, 555.32910487],
                                  [0., 0., 1.]])
cam_intrinsics = np.array([[965.24853516, 0., 950.50838964],
                           [0., 939.67144775, 554.55567298],
                           [0., 0., 1.]])  # New Camera Intrinsic Matrix
dist = np.array([[0.0990126, -0.10306044, 0.00024658, -0.00268176,
                  0.05763196]])
camera = Kinect(cam_intrinsics_origin, dist)

cam_pose = np.loadtxt('real/camera_pose.txt', delimiter=' ')
cam_depth_scale = np.loadtxt('real/camera_depth_scale.txt', delimiter=' ')

# User options (change me)
# --------------- Setup options ---------------
urc = UR_Controller()
urc.start()
a = 0.2
v = 0.2

# workspace_limits = np.asarray([[0.3, 0.748], [-0.224, 0.224], [-0.255, -0.1]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([
    [-0.845, -0.605], [-0.14, 0.2], [0, 0.2]
])  # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)

# tool_orientation = [2.22,-2.22,0]
# tool_orientation = [0., -np.pi/2, 0.] # [0,-2.22,2.22] # [2.22,2.22,0]
from scipy.spatial.transform import Rotation as R
# tool_orientation_euler = [180, 0, 90]
# tool_orientation_euler = [180, 0, 0]
tool_orientation_euler = [180, 0, 180]
示例#4
0
import time
import cv2
from controller.ur5.ur_controller import UR_Controller
from controller.mini_robot_arm.RX150_driver import RX150_Driver
from perception.kinect.kinect_camera import Kinect
from scipy.spatial.transform import Rotation as R
from controller.gripper.gripper_control import Gripper_Controller
from scipy.interpolate import griddata
from perception.fabric.util.segmentation import segmentation
from perception.fabric.run import Run
from PIL import Image
import skimage.measure
import torchvision.transforms as T
import os

urc = UR_Controller()
urc.start()
a = 0.2
v = 0.2

rx150 = RX150_Driver(port="/dev/ttyACM0", baudrate=1000000)
rx150.torque(enable=1)
print(rx150.readpos())


def rx_move(g_open, x_pos):
    values = [1024, 2549, 1110, 1400, 0, g_open]
    x = x_pos
    y = 90
    end_angle = 0
    rx150.gogo(values, x, 90, end_angle, 0, timestamp=100)
示例#5
0
[917.3927285,    0.,         957.21294894],
[  0.,         918.96234057, 555.32910487],
[  0.,           0.,           1.        ]])
cam_intrinsics = np.array([
[965.24853516,   0.,         950.50838964],
 [  0.,         939.67144775, 554.55567298],
 [  0.,           0.,           1.        ]]) # New Camera Intrinsic Matrix
dist = np.array([[ 0.0990126,  -0.10306044,  0.00024658, -0.00268176,  0.05763196]])
camera = Kinect(cam_intrinsics_origin, dist)
# User options (change me)
# --------------- Setup options ---------------
# tcp_host_ip = '100.127.7.223' # IP and port to robot arm as TCP client (UR5)
# tcp_port = 30002
# rtc_host_ip = '100.127.7.223' # IP and port to robot arm as real-time client (UR5)
# rtc_port = 30003
urc = UR_Controller()
urc.start()
a = 0.2
v = 0.2
# pose0 = np.array([-0.511, 0.294, 0.237, -0.032, -1.666, 0.138])
pose0 = np.array([-0.718, 0.15, 0.05, 0.26, -np.pi/2, 0.])

# urc.movel_wait(pose0, a=a, v=v)
# exit()
print(', '.join([str("{:.3f}".format(_)) for _ in urc.getl_rt()]))

# workspace_limits = np.asarray([[0.3, 0.748], [0.05, 0.4], [-0.2, -0.1]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
# workspace_limits = np.asarray([[-0.7, -0.3], [-0.2, 0.34], [0, 0.2]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
# workspace_limits = np.asarray([[-0.845, -0.605], [-0.14, 0.2], [0.02, 0.22]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
workspace_limits = np.asarray([[-0.845, -0.718], [-0.14, 0.15], [0.05, 0.26]]) # Cols: min max, Rows: x y z (define workspace limits in robot coordinates)
calib_grid_step = 0.05
import csv
import random
from perception.wedge.gelsight.util.Vis3D import ClassVis3D

from perception.wedge.gelsight.gelsight_driver import GelSight
from controller.gripper.gripper_control import Gripper_Controller
from controller.ur5.ur_controller import UR_Controller
from controller.mini_robot_arm.RX150_driver import RX150_Driver

import keyboard
from queue import Queue
from logger_class import Logger

import collections

urc = UR_Controller()
grc = Gripper_Controller()

urc.start()
grc.start()

# pose0 = np.array([-0.505-.1693, -0.219, 0.235, -1.129, -1.226, 1.326])
pose0 = np.array([-0.667, -0.196, 0.228, 1.146, -1.237, -1.227])
grc.gripper_helper.set_gripper_current_limit(0.6)

rx150 = RX150_Driver(port="/dev/ttyACM0", baudrate=1000000)
rx150.torque(enable=1)
print(rx150.readpos())

ang = 30
# ang = -90
[917.3927285,    0.,         957.21294894],
[  0.,         918.96234057, 555.32910487],
[  0.,           0.,           1.        ]])
cam_intrinsics = np.array([
[965.24853516,   0.,         950.50838964],
 [  0.,         939.67144775, 554.55567298],
 [  0.,           0.,           1.        ]]) # New Camera Intrinsic Matrix
dist = np.array([[ 0.0990126,  -0.10306044,  0.00024658, -0.00268176,  0.05763196]])
camera = Kinect(cam_intrinsics_origin, dist)

cam_pose = np.loadtxt('real/camera_pose.txt', delimiter=' ')
cam_depth_scale = np.loadtxt('real/camera_depth_scale.txt', delimiter=' ')

# User options (change me)
# --------------- Setup options ---------------
urc = UR_Controller()
urc.start()
a = 0.2
v = 0.2

rx150 = RX150_Driver(port="/dev/ttyACM0", baudrate=1000000)
rx150.torque(enable=1)
print(rx150.readpos())

def rx_move(g_open, x_pos, end_angle=0, timestamp=100):
    values = [1024, 2549, 1110, 1400, 0, g_open]
    x = x_pos
    y = 90
    end_angle = end_angle
    rx150.gogo(values, x, y, end_angle, 0, timestamp=timestamp)