def mask_image(img_color, img_point):
    # 2D color masking
    img_hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)
    img_masked = cv2.inRange(img_hsv, hsv_lower_red, hsv_upper_red)

    # noise filtering
    kernel = np.ones((2, 2), np.uint8)
    img_masked = cv2.morphologyEx(img_masked, cv2.MORPH_OPEN, kernel)

    # color masking
    con1 = (img_masked == 255)
    arg1 = np.argwhere(con1)
    pnt1 = img_point[con1]

    # remove nan
    con2 = (~np.isnan(pnt1).any(axis=1))
    arg2 = np.argwhere(con2)
    pnt2 = pnt1[con2]

    # transform w.r.t. task coordinate
    Tpc = np.eye(4)
    pnt2_tr = BallDetectionRGBD.transform(pnt2, Tpc)

    # depth masking
    con3 = (pnt2_tr[:, 2] > masking_depth[0]) & (pnt2_tr[:, 2] <
                                                 masking_depth[1])
    arg3 = np.argwhere(con3)

    # creat mask where color & depth conditions hold
    arg_mask = np.squeeze(arg1[arg2[arg3]])
    mask = np.zeros_like(img_masked)
    mask[arg_mask[:, 0], arg_mask[:, 1]] = 255
    return mask
Ejemplo n.º 2
0

# configurations
use_Trc = True
which_camera = 'inclined'
which_arm = 'PSM1'

# define objects
zivid = ZividCapture(which_camera=which_camera)
zivid.start()
dvrk = dvrkController(arm_name='/' + which_arm, comp_hysteresis=True)
# dvrk = dvrkArm('/'+which_arm)
Trc = np.load(root + 'calibration_files/Trc_' + which_camera + '_' +
              which_arm + '.npy')
Tpc = np.load(root + 'calibration_files/Tpc_' + which_camera + '.npy')
bd = BallDetectionRGBD(Trc=Trc, Tpc=Tpc, which_camera=which_camera)

traj = np.load("traj.npy", allow_pickle=True)
traj_opt = np.load("traj_opt_cubic.npy", allow_pickle=True)

# # run motion (linear)
# for tt in traj:
#     for qs in tt:
#         dvrk.set_joint(qs[0], wait_callback=True)
#         for q in qs:
#             dvrk.set_joint(q, wait_callback=False)
#             time.sleep(0.01)
#         dvrk.set_joint(qs[-1], wait_callback=True)
#
# # run motion (optimized)
# for qs in traj_opt:
from mpl_toolkits.mplot3d import axes3d
import FLSpegtransfer.utils.CmnUtil as U
from FLSpegtransfer.vision.BallDetectionRGBD import BallDetectionRGBD
root = '/home/hwangmh/pycharmprojects/FLSpegtransfer/'
plt.style.use('seaborn-whitegrid')
# plt.style.use('bmh')
plt.rc('font', size=12)  # controls default text sizes
plt.rc('axes', titlesize=20)  # fontsize of the axes title
plt.rc('axes', labelsize=15)  # fontsize of the x and y labels
plt.rc('xtick', labelsize=10)  # fontsize of the tick labels
plt.rc('ytick', labelsize=10)  # fontsize of the tick labels
plt.rc('legend', fontsize=15)  # legend fontsize
plt.rc('figure', titlesize=10)  # fontsize of the figure title

# Trajectory check
BD = BallDetectionRGBD()
file_path = root + 'experiment/0_trajectory_extraction/'
arm_traj = np.load(file_path + 'ready_movement.npy')
wrist_traj = np.load(file_path + 'ready_movement.npy')
# arm_traj[:, 2] -= 0.010
# arm_traj = arm_traj[6255-5618:]
# wrist_traj = wrist_traj[1:]
# print(arm_traj.shape, wrist_traj.shape)

fc = 5
dt = 0.01
arm_traj = U.LPF(arm_traj, fc, dt)
wrist_traj = U.LPF(wrist_traj, fc, dt)
arm_traj = arm_traj[::
                    3]  # down sampling by 3 for peg transfer and by 2 for random trajectory
wrist_traj = wrist_traj[::3]
Ejemplo n.º 4
0
import sys
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BallDetectionRGBD import BallDetectionRGBD
from FLSpegtransfer.motion.dvrkKinematics import dvrkKinematics

BD = BallDetectionRGBD()
zivid = ZividCapture()
# zivid.start()

while True:
    color = np.load('ball_img_zivid_color.npy')
    point = np.load('ball_img_zivid_point.npy')
    # point = np.load('fiducial_images/0/point.npy')
    # color, depth, point = zivid.capture_3Dimage(color='BGR')

    # Find balls
    pbr = BD.find_balls(color, point, 'red')
    pbg = BD.find_balls(color, point, 'green')
    pbb = BD.find_balls(color, point, 'blue')
    pby = BD.find_balls(color, point, 'yellow')

    pbr = np.array(pbr)
    d1 = np.linalg.norm(pbr[0][:3] - pbr[1][:3])
    d2 = np.linalg.norm(pbr[0][:3] - pbr[2][:3])
    d3 = np.linalg.norm(pbr[1][:3] - pbr[2][:3])
    print(d1, d2, d3)
Ejemplo n.º 5
0
plt.figure()
plt.plot(t, error, 'o-', markersize=3)
plt.xlabel("Samples")
plt.ylabel("Error (mm)")
plt.xticks(np.arange(0, 122, step=10))
bins = np.arange(-0.8, 0.8, 0.08)
plt.figure()
plt.xlabel("Error (mm)")
plt.ylabel("Frequency")
plt.hist(error, bins=bins)
plt.show()

# Evaluate the error effects on joint angle estimation
from FLSpegtransfer.vision.BallDetectionRGBD import BallDetectionRGBD
from FLSpegtransfer.motion.dvrkKinematics import dvrkKinematics
BD = BallDetectionRGBD()


def plot_joint(t, q_des, q_act):
    t = np.array(t)
    q_des = np.array(q_des)
    q_act = np.array(q_act)

    # Create plot
    # plt.title('joint angle')
    ax = plt.subplot(611)
    plt.plot(t, q_des[:, 0] * 180. / np.pi, 'b-')
    plt.plot(t, q_act[:, 0] * 180. / np.pi, 'r-')
    plt.legend(loc='upper center', bbox_to_anchor=(0.9, 2))
    # plt.legend(['q_des', 'q_grey', 'q_red'], ncol=3, bbox_to_anchor=(0.5, 1), loc="lower center")
    # plt.ylim([35, 62])
def find_balls(img_color, img_point, nb_balls, visualize=False):
    # mask color & depth
    masked = mask_image(img_color, img_point)

    if visualize:
        cv2.imshow("", masked)
        cv2.waitKey(0)

    # Find contours
    cnts, _ = cv2.findContours(masked, cv2.RETR_EXTERNAL,
                               cv2.CHAIN_APPROX_SIMPLE)
    cnts = sorted(cnts, key=cv2.contourArea, reverse=True)

    # fit spheres by contour points
    max_cnts = []
    best_models = []
    max_ratios = []
    for c in cnts:
        if len(c) < 50:
            continue
        else:
            color = np.copy(img_color)
            cv2.drawContours(color, [c], -1, (0, 255, 255), 1)

            if visualize:
                cv2.imshow("", color)
                cv2.waitKey(0)

            # Find ball in 2D image
            args = np.squeeze(c)
            x = args[:, 0]
            y = args[:, 1]
            best_model, max_cnt, max_ratio = ransac.fit(x, y, 100, threshold=2)
            best_models.append(best_model)
            max_cnts.append(max_cnt)
            max_ratios.append(max_ratio)

    # sort by best matched counts
    arg = np.argsort(max_ratios)[::-1]
    best_models = np.array(best_models)[arg][:nb_balls]

    # fit spheres by 3D points
    pb = []
    for model in best_models:
        xi, yi, ri = model
        infilled = np.zeros(np.shape(img_color), np.uint8)
        cv2.circle(infilled, (int(xi), int(yi)), int(ri), (0, 255, 255), -1)
        infilled = cv2.cvtColor(infilled, cv2.COLOR_BGR2GRAY)
        ball_masked = cv2.bitwise_and(masked, masked, mask=infilled)

        if visualize:
            cv2.imshow("", ball_masked)
            cv2.waitKey(0)

        # Get the point sets
        args = np.argwhere(ball_masked == 255)
        points_ball = img_point[args[:, 0], args[:, 1]]

        # Linear regression to fit the circle into the point cloud
        xc, yc, zc, rc = BallDetectionRGBD.fit_circle_3d(
            points_ball[:, 0], points_ball[:, 1], points_ball[:, 2])
        pb.append([xc, yc, zc, rc])

    # sort by x-position
    pb = np.array(pb)
    arg = np.argsort(pb[:, 0])
    return pb[arg]
import sys
for p in sys.path:
    if p == '/opt/ros/kinetic/lib/python2.7/dist-packages':
        sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
import cv2
import numpy as np
from FLSpegtransfer.vision.ZividCapture import ZividCapture
from FLSpegtransfer.vision.BallDetectionRGBD import BallDetectionRGBD
from FLSpegtransfer.utils.RANSAC.RANSAC_circle import RANSAC_circle
from sklearn.metrics import mean_squared_error

ball = BallDetectionRGBD()
ransac = RANSAC_circle()
zivid = ZividCapture()
# zivid.start()

root = '/home/hwangmh/pycharmprojects/FLSpegtransfer/'
Tpc = np.load(root + 'calibration_files/Tpc.npy')
hsv_lower_red = np.array([0 - 20, 130, 40])
hsv_upper_red = np.array([0 + 20, 255, 255])
# masking_depth = [-150, 0]
masking_depth = [500, 730]


def mask_image(img_color, img_point):
    # 2D color masking
    img_hsv = cv2.cvtColor(img_color, cv2.COLOR_BGR2HSV)
    img_masked = cv2.inRange(img_hsv, hsv_lower_red, hsv_upper_red)

    # noise filtering
    kernel = np.ones((2, 2), np.uint8)