class Gui(QtGui.QMainWindow): """ Main GUI Class contains the main function and interfaces between the GUI and functions """ def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) """ Main Variables Using Other Classes""" self.rex = Rexarm() self.video = Video() self.statemachine = Statemachine() """ Other Variables """ self.last_click = np.float32([0, 0]) """ Customized events """ self.flag_pickAndPlace = 0 self.n_pickLocation = 2 self.flag_event = 0 """ Set GUI to track mouse """ QtGui.QWidget.setMouseTracking(self, True) """ GUI timer Creates a timer and calls update_gui() function according to the given time delay (27ms) """ self._timer = QtCore.QTimer(self) self._timer.timeout.connect(self.update_gui) self._timer.start(27) """ Event timer Creates a timer and calls updateEvent() function according to the given time delay (???ms) """ # Thread(target=self.event_one).start() # self._timer3 = QtCore.QTimer(self) # self._timer3.timeout.connect(self.event_one) # self._timer3.start(50) """ LCM Arm Feedback timer Creates a timer to call LCM handler for Rexarm feedback """ self._timer2 = QtCore.QTimer(self) self._timer2.timeout.connect(self.rex.get_feedback) self._timer2.start() """ Connect Sliders to Function TODO: CONNECT THE OTHER 5 SLIDERS IMPLEMENTED IN THE GUI """ self.ui.sldrBase.valueChanged.connect(self.sliderChange) self.ui.sldrShoulder.valueChanged.connect(self.sliderChange) self.ui.sldrElbow.valueChanged.connect(self.sliderChange) self.ui.sldrWrist.valueChanged.connect(self.sliderChange) self.ui.sldrGrip1.valueChanged.connect(self.sliderChange) self.ui.sldrGrip2.valueChanged.connect(self.sliderChange) """ Initial command of the arm to home position""" self.sliderChange() """ Connect Buttons to Functions TODO: NAME AND CONNECT BUTTONS AS NEEDED """ self.ui.btnUser1.setText("Reset Location") self.ui.btnUser1.clicked.connect(self.reset) self.ui.btnUser2.setText("Play Event 1") self.ui.btnUser2.clicked.connect(self.event_one) self.ui.btnUser3.setText("Play Event 2") self.ui.btnUser3.clicked.connect(self.event_two) self.ui.btnUser4.setText("Play Event 3") self.ui.btnUser4.clicked.connect(self.event_three) self.ui.btnUser5.setText("Play Event 4") self.ui.btnUser5.clicked.connect(self.event_four) self.ui.btnUser6.setText("Play Event 5") self.ui.btnUser6.clicked.connect(self.event_five) # self.ui.btnUser6.setText("Test Arm Movement") # self.ui.btnUser6.clicked.connect(self.testArm) self.ui.btnUser7.setText("Click to grab and place") self.ui.btnUser7.clicked.connect(self.GrabAndPlace) # self.ui.btnUser8.setText("Teach and Repeat") # self.ui.btnUser8.clicked.connect(self.teach) # self.ui.btnUser9.setText("Record Arm Location") # self.ui.btnUser9.clicked.connect(self.record) # self.ui.btnUser10.setText("Play Arm Locations") # self.ui.btnUser10.clicked.connect(self.playback) self.ui.btnUser10.setText("Calibrate Camera") self.ui.btnUser10.clicked.connect(self.simple_cal) self.ui.btnUser11.setText("Configure Servos") self.ui.btnUser12.clicked.connect(self.rex.cfg_publish_default) self.ui.btnUser12.setText("Emergency Stop") self.ui.btnUser12.clicked.connect(self.estop) # self.ui.btnUser11.setText("Reach DW") # self.ui.btnUser11.clicked.connect(self.reach_down) def reset(self): self.statemachine.reset(self.rex) def event_one(self): eventMode = 1 print('Start playing event 1:') self.statemachine.runStateMachine(self.video, self.rex, eventMode) def event_two(self): eventMode = 2 print('Start playing event 2:') self.statemachine.runStateMachine(self.video, self.rex, eventMode) def event_three(self): eventMode = 3 print('Start playing event 3:') self.statemachine.runStateMachine(self.video, self.rex, eventMode) def event_four(self): eventMode = 4 print('Start playing event 4:') self.statemachine.runStateMachine(self.video, self.rex, eventMode) def event_five(self): eventMode = 5 print('Start playing event 5:') self.statemachine.runStateMachine(self.video, self.rex, eventMode) def GrabAndPlace(self): eventMode = 6 self.video.click2grab = 1 self.video.mouse_click_id = 0 # self.video.wld_coord = np.zeros((2,3)) if self.video.click2grab == 2: print('Pick and place the following' + self.video.wld_coord) # self.statemachine.runStateMachine(self.video, self.rex, eventMode) def testArm(self): self.statemachine.testArmMove(self.ui, self.rex) def estop(self): self.statemachine.estop(self.rex) def update_gui(self): """ update_gui function Continuously called by timer1 """ """ Renders the video frames Displays a dummy image if no video available HINT: you can use the radio buttons to select different video sources like is done below """ if (self.video.kinectConnected == 1): try: self.video.captureVideoFrame() self.video.captureDepthFrame() except TypeError: self.video.kinectConnected = 0 self.video.loadVideoFrame() self.video.loadDepthFrame() if (self.ui.radioVideo.isChecked()): # self.ui.videoFrame self.ui.videoFrame.setPixmap(self.video.convertFrame()) if (self.ui.radioDepth.isChecked()): self.ui.videoFrame.setPixmap(self.video.convertDepthFrame()) """ Update GUI Joint Coordinates Labels """ self.ui.rdoutBaseJC.setText( str("%.2f" % (self.rex.joint_angles_fb[0] * R2D))) self.ui.rdoutShoulderJC.setText( str("%.2f" % (self.rex.joint_angles_fb[1] * R2D))) self.ui.rdoutElbowJC.setText( str("%.2f" % (self.rex.joint_angles_fb[2] * R2D))) self.ui.rdoutWristJC.setText( str("%.2f" % (self.rex.joint_angles_fb[3] * R2D))) self.ui.rdoutGrip1.setText( str("%.2f" % (self.rex.joint_angles_fb[4] * R2D))) self.ui.rdoutGrip2.setText( str("%.2f" % (self.rex.joint_angles_fb[5] * R2D))) """ Mouse position presentation in GUI TODO: after implementing workspace calibration display the world coordinates the mouse points to in the RGB video image. """ x = QtGui.QWidget.mapFromGlobal(self, QtGui.QCursor.pos()).x() y = QtGui.QWidget.mapFromGlobal(self, QtGui.QCursor.pos()).y() if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): self.ui.rdoutMousePixels.setText("(-,-,-)") self.ui.rdoutMouseWorld.setText("(-,-,-)") else: x = x - MIN_X y = y - MIN_Y d = 0 """ RGB, depth sensor registration""" if (self.ui.radioVideo.isChecked()): x_depth, y_depth = np.dot(rgb2depthAff, np.array([x, y, 1]).T) if y_depth >= 480 or x_depth >= 600 or y_depth <= 0 or x_depth <= 0: self.ui.rdoutMousePixels.setText("(-,-,-)") else: d = self.video.currentDepthFrame[int(y_depth)][int( x_depth)] else: if y >= 480 or x >= 600 or y <= 0 or x <= 0: self.ui.rdoutMousePixels.setText("(-,-,-)") else: d = self.video.currentDepthFrame[int(y)][int(x)] """ Convert image coord into world coord""" x_wld, y_wld, z_wld = 0, 0, 0 x_wld, y_wld, z_wld = img2wld(x, y, d) self.ui.rdoutMousePixels.setText("(%.0f,%.0f,%.0f)" % (x, y, d)) self.ui.rdoutMouseWorld.setText("(%.0f,%.0f,%.0f)" % (x_wld, y_wld, z_wld)) """ Updates status label when rexarm playback is been executed. This can be extended to include other appropriate messages """ # if(self.rex.plan_status == 1): # self.playback(maxMove) # self.ui.rdoutStatus.setText("Playing Back - Waypoint %d" # %(self.rex.wpt_number)) def playEvent(self): """ Run state machine """ if self.flag_event != 0: eventStatus = False # True means assigned event is finished, otherwise False eventStatus = self.statemachine.runStateMachine( self.video, self.rex, self.flag_event) if eventStatus: self.flag_event = 0 print('Event is finished, end of updating Gui') else: print('Event is not finished, end of updating Gui') def sliderChange(self): """ Function to change the slider labels when sliders are moved and to command the arm to the given position """ self.ui.rdoutBase.setText(str(self.ui.sldrBase.value())) self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%") self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value())) self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value())) self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value())) self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value())) self.ui.rdoutGrip2.setText(str(self.ui.sldrGrip2.value())) self.rex.torque_multiplier = self.ui.sldrMaxTorque.value() / 100.0 self.rex.speed_multiplier = self.ui.sldrSpeed.value() / 100.0 self.rex.joint_angles[0] = self.ui.sldrBase.value() * D2R self.rex.joint_angles[1] = self.ui.sldrShoulder.value() * D2R self.rex.joint_angles[2] = self.ui.sldrElbow.value() * D2R self.rex.joint_angles[3] = self.ui.sldrWrist.value() * D2R self.rex.joint_angles[4] = self.ui.sldrGrip1.value() * D2R self.rex.joint_angles[5] = self.ui.sldrGrip2.value() * D2R print('Joint angles from UI:') print(self.rex.joint_angles) self.rex.cmd_publish() print('Rexarm angle command published') # self.forward_kinematic() def mousePressEvent(self, QMouseEvent): """ Function used to record mouse click positions for calibration """ """ Get mouse posiiton """ x = QMouseEvent.x() y = QMouseEvent.y() """ If mouse position is not over the camera image ignore """ if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): return """ Change coordinates to image axis """ self.last_click[0] = x - MIN_X self.last_click[1] = y - MIN_Y "Transform coordinates if cam calibration is not called" if (self.video.cal_flag == 0): if (self.ui.radioVideo.isChecked()): # Obtain x, y, z in RGB and depth sensor x = x - MIN_X y = y - MIN_Y x_depth, y_depth = np.dot(rgb2depthAff, np.array([x, y, 1]).T) if y_depth >= 480 or x_depth >= 600 or y_depth <= 0 or x_depth <= 0: return z = self.video.currentDepthFrame[int(y_depth)][int(x_depth)] # Convert coord into world frame imgHom2Cam = cvtD2Z(z) * invIntrinsicMat camCoord = np.dot(imgHom2Cam, np.float32([x, y, 1])) camHomCoord = np.append(camCoord, [1]) rw_v2 = np.dot(invExtrinsicMat, camHomCoord) x_wld, y_wld, z_wld = rw_v2[0] / rw_v2[3], rw_v2[1] / rw_v2[ 3], rw_v2[2] / rw_v2[3] print('The mouse clicked world frame is: ') print(x_wld, y_wld, z_wld) """ get the RGB value at the cursor location""" # rgb_frame = cv2.cvtColor(self.video.currentVideoFrame, cv2.COLOR_RGB2BGR) # print(rgb_frame[y-MIN_Y][x-MIN_X]) print('rgb: ') print(self.video.currentVideoFrame[y - MIN_Y][ x - MIN_X]) # print RGB value at the click location print(self.video.currentDepthFrame[y - MIN_Y][ x - MIN_X]) # print depth d at the click location """ Return clicked coords """ # self.video.mouse_click_id = 0 if (self.video.click2grab == 1): self.video.wld_coord[ self.video.mouse_click_id] = x_wld, y_wld, z_wld self.video.mouse_click_id += 1 if (self.video.mouse_click_id == 2): print('flag' + str(self.video.click2grab)) self.video.click2grab = 2 print('flag changed to ' + str(self.video.click2grab)) self.statemachine.runStateMachine(self.video, self.rex, 6) """ If calibration is called to perform""" if (self.video.cal_flag == 1 ): # 0 - not performing calibration, 1 - performing calibraiton """ Save last mouse coordinate """ self.video.mouse_coord[self.video.mouse_click_id] = [(x - MIN_X), (y - MIN_Y)] """ Update the number of used poitns for calibration """ self.video.mouse_click_id += 1 """ Update status label text """ self.ui.rdoutStatus.setText("Affine Calibration: Click Point %d" % (self.video.mouse_click_id + 1)) """ If the number of click is equal to the expected number of points computes the affine calibration. TODO: LAB TASK: Change this code to use your workspace calibration routine and NOT the simple calibration function as is done now. """ x = x - MIN_X y = y - MIN_Y if (self.ui.radioVideo.isChecked()): x, y = np.dot(rgb2depthAff, np.array([x, y, 1]).T) d = self.video.currentDepthFrame[int(y)][int(x)] depth = cvtD2Z(d) image_coord = np.append( self.video.mouse_coord[self.video.mouse_click_id - 1], [1]) # projection_mat = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]]) camera_c = depth * np.dot(invIntrinsicMat, image_coord) print('Current cammera cord;') print(camera_c) self.video.camera_coord[self.video.mouse_click_id - 1] = camera_c print('Current #' + str(self.video.mouse_click_id)) if (self.video.mouse_click_id == self.video.cal_points): """ Update status of calibration flag and number of mouse clicks """ self.video.mouse_click_id = 0 wld2CamAffine = calAffine3D(self.video.real_coord, self.video.camera_coord) print(wld2CamAffine) wld2CamPadding = np.zeros((1, 4), dtype=float) wld2CamPadding[0, 3] = 1. self.video.extrinsicMat = np.concatenate( (wld2CamAffine, wld2CamPadding), axis=0) print('Before thresholed extrinsic matrix:') print(self.video.extrinsicMat) threshold = 1e-7 self.video.invExtrinsicMat = np.linalg.pinv( self.video.extrinsicMat) img2CamPadding = np.zeros((1, 3), dtype=float) img2CamPadding[0, 2] = 1. self.video.imgHom2CamHom = np.concatenate( (depth * invIntrinsicMat, img2CamPadding), axis=0) self.video.CamHom2WldHom = np.linalg.pinv( self.video.extrinsicMat) self.video.aff_matrix = np.dot(self.video.CamHom2WldHom, self.video.imgHom2CamHom) print('Image coord is ') # self.video.aff_matrix = np.dot(extrinsicMat, np.dot(projection_mat.T,intrinsicMat)) # self.video.aff_matrix = cv2.getAffineTransform( # self.video.mouse_coord, # self.video.real_coord) """ Updates Status Label to inform calibration is done """ self.ui.rdoutStatus.setText("Waiting for input") print('**' * 10) # print(self.video.aff_matrix) print('Extrinsic matrix:') print(self.video.extrinsicMat) print('Inverse of Extrinsic matrix:') print(np.linalg.pinv(self.video.extrinsicMat)) print('Intrinsic matrix:') print(intrinsicMat) print('Inverse Intrinsic matrix:') print(invIntrinsicMat) print('camera_c:') print(self.video.camera_coord) self.viedo.cal_flag = 0 if (self.flag_pickAndPlace == 1): # if(self.video.mouse_click_id == self.video.cal_points): pos = np.array([x_wld, y_wld, z_wld]) self.executePickAndPlace(pos) def simple_cal(self): """ Function called when affine calibration button is called. Note it only chnages the flag to record the next mouse clicks and updates the status text label """ self.video.cal_flag = 1 self.ui.rdoutStatus.setText("Affine Calibration: Click Point %d" % (self.video.mouse_click_id + 1)) """ Retired code, for testing kinematic """ # def inverse_kinematic_elbowup(self, target): # using the x,y,z,phi from self.getCurLoc() # to calculate theta0-3 and then go to the # targeted location # target = self.forward_kinematic() # angles = self.rex.rexarm_IK(target, 1) # 1-elbow up # if angles is None: # print('ERROR: Can not reach the given location!') # else: # print(np.asarray(angles) * R2D) # return angles # def inverse_kinematic_elbowdown(self, target): # # target = self.forward_kinematic() # angles = self.rex.rexarm_IK(target, 0) # 0-elbow down # if angles is None: # print('ERROR: Can not reach the given location!') # else: # print(np.asarray(angles) * R2D) # return angles # def reach_up(self, target): # angles = self.inverse_kinematic_elbowup(target) # print(angles) # self.rex.joint_angles[0], self.rex.joint_angles[1], \ # self.rex.joint_angles[2], self.rex.joint_angles[3] = angles # # self.rex.cmd_publish() # def reach_down(self, target): # angles = self.inverse_kinematic_elbowdown(target) # self.rex.joint_angles[0], self.rex.joint_angles[1], \ # self.rex.joint_angles[2], self.rex.joint_angles[3] = angles # self.rex.cmd_publish() # def forward_kinematic(self): # self.rex.torque_multiplier = 0 # self.rex.cmd_publish() # all angles are in radius # theta1 = self.rex.joint_angles_fb[1] # squihoulder # theta2 = self.rex.joint_angles_fb[2] # elbow # theta3 = self.rex.joint_angles_fb[3] # wrist # # TODO: check link with i # a1 = 99 # link length # a2 = 99 # a3 = 140 # baseTheta = self.rex.joint_angles_fb[0] # base angle # dh_table = (theta1,theta2,theta3, a1, a2, a3, baseTheta) # x, y, z, phi = self.rex.rexarm_FK(dh_table, None) # self.ui.rdoutX.setText(str("%.2f mm" %x)) # self.ui.rdoutY.setText(str("%.2f mm" %y)) # self.ui.rdoutZ.setText(str("%.2f mm" %z)) # self.ui.rdoutT.setText(str("%.2f deg" % (phi*R2D))) # return (x,y,z,phi) "" """
class Gui(QtGui.QMainWindow): """ Main GUI Class contains the main function and interfaces between the GUI and functions """ def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) """ Main Variables Using Other Classes""" self.rex = Rexarm() self.video = Video() self.statemachine = Statemachine() #self.video.extrinsic = np.array(self.video.getcaldata()) #self.video.inv_extrinsic = np.array(np.linalg.inv(self.video.extrinsic)) """ Other Variables """ self.last_click = np.float32([0, 0]) """ Set GUI to track mouse """ QtGui.QWidget.setMouseTracking(self, True) self.statemachine.setme(self.ui, self.rex) """ GUI timer Creates a timer and calls update_gui() function according to the given time delay (27ms) """ self._timer = QtCore.QTimer(self) self._timer.timeout.connect(self.update_gui) self._timer.start(27) """ LCM Arm Feedback timer Creates a timer to call LCM handler for Rexarm feedback """ self._timer2 = QtCore.QTimer(self) self._timer2.timeout.connect(self.rex.get_feedback) self._timer2.start() """ Giri - Statemachine timer """ #self._timer3 = QtCore.QTimer(self) #self._timer3.timeout.connect(self.statemachine.timerCallback) #self._timer3.start(100) # frequency of the timer is set by this. it is 100 ms now. However, the time.now can give microseconds precision not just millisecond """ Connect Sliders to Function TODO: CONNECT THE OTHER 5 SLIDERS IMPLEMENTED IN THE GUI """ self.ui.sldrBase.valueChanged.connect(self.sliderBaseChange) self.ui.sldrShoulder.valueChanged.connect( self.sliderShoulderChange) #Giri self.ui.sldrElbow.valueChanged.connect(self.sliderElbowChange) #Giri self.ui.sldrWrist.valueChanged.connect(self.sliderWristChange) #Giri self.ui.sldrGrip1.valueChanged.connect(self.sliderGrip1Change) #Giri self.ui.sldrGrip2.valueChanged.connect(self.sliderGrip2Change) #Giri self.ui.sldrMaxTorque.valueChanged.connect( self.sliderOtherChange) #Giri self.ui.sldrSpeed.valueChanged.connect(self.sliderOtherChange) #Giri if (self.rex.num_joints == 6): self.ui.sldrGrip1.setEnabled(True) #Giri self.ui.sldrGrip2.setEnabled(True) #Giri else: self.ui.sldrGrip1.setEnabled(False) #Giri self.ui.sldrGrip2.setEnabled(False) #Giri """ Initial command of the arm to home position""" self.sliderChange() self.reset() """ Connect Buttons to Functions TODO: NAME AND CONNECT BUTTONS AS NEEDED """ self.ui.btnUser1.setText("Configure Servos") self.ui.btnUser1.clicked.connect(self.btn1) #Giri self.ui.btnUser2.setText("Depth and RGB Calibration") self.ui.btnUser2.clicked.connect(self.btn2) #Giri self.ui.btnUser2.setEnabled(False) self.ui.btnUser3.setText("Extrinsic Calibration") #Giri self.ui.btnUser3.clicked.connect(self.btn3) self.ui.btnUser4.setText("Block Detector") #Giri self.ui.btnUser4.clicked.connect(self.btn4) self.ui.btnUser5.setText("Repeat") self.ui.btnUser5.clicked.connect(self.btn5) #Giri self.ui.btnUser6.setText("Teach") self.ui.btnUser6.clicked.connect(self.teach) #Giri self.ui.btnUser7.setText("Smooth Repeat") self.ui.btnUser7.clicked.connect(self.srepeat) #Giri self.ui.btnUser7.setEnabled(False) #Giri self.ui.btnUser8.setText("Pick Block") self.ui.btnUser8.clicked.connect(self.pick) self.ui.btnUser9.setText("Place Block") self.ui.btnUser9.clicked.connect(self.place) self.ui.btnUser10.setText("Reset") self.ui.btnUser10.clicked.connect(self.reset) #Giri self.ui.btnUser11.setText("Enter Competition Mode") self.ui.btnUser11.clicked.connect(self.competition) #Giri self.ui.btnUser12.setText("Emergency Stop!") self.ui.btnUser12.clicked.connect(self.estop) def estop(self): self.statemachine.estop(self.rex) def trim_angle(self, q, motorno=1): #for 1,2,3 if (motorno > 2): max_angle = 300 else: max_angle = 360 if (q > max_angle or q < -1 * max_angle): new_q = 0 print "trimmed: " + str(q) + " motor no: " + str(motorno) elif (q > 180): #181 --> -179 new_q = q - 360 print "trimmed: " + str(q) + " motor no: " + str(motorno) elif (q < -180): new_q = 360 - q print "trimmed: " + str(q) + " motor no: " + str(motorno) else: new_q = q return new_q def shortorientation(self, w_angle): if (w_angle > 90): sw_angle = w_angle - 90 elif (w_angle < -90): sw_angle = w_angle + 90 else: sw_angle = w_angle return sw_angle def update_gui(self): """ update_gui function Continuously called by timer1 """ """ Renders the video frames Displays a dummy image if no video available HINT: you can use the radio buttons to select different video sources like is done below """ color = self.statemachine.timerCallback() if (color == "Decluttered"): self.declutter() elif (color != "none"): self.getangles(color) if (self.video.kinectConnected == 1): try: self.video.captureVideoFrame() self.video.captureDepthFrame() except TypeError: self.video.kinectConnected = 0 self.video.loadVideoFrame() self.video.loadDepthFrame() if (self.ui.radioVideo.isChecked()): self.ui.videoFrame.setPixmap(self.video.convertFrame()) if (self.ui.radioDepth.isChecked()): self.ui.videoFrame.setPixmap(self.video.convertDepthFrame()) """ Update GUI Joint Coordinates Labels """ self.ui.rdoutBaseJC.setText( str("%.2f" % (self.rex.joint_angles_fb[0] * R2D))) self.ui.rdoutShoulderJC.setText( str("%.2f" % (self.rex.joint_angles_fb[1] * R2D))) self.ui.rdoutElbowJC.setText( str("%.2f" % (self.rex.joint_angles_fb[2] * R2D))) self.ui.rdoutWristJC.setText( str("%.2f" % (self.rex.joint_angles_fb[3] * R2D))) self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value())) #Giri self.ui.rdoutGrip2.setText(str(self.ui.sldrGrip2.value())) #Giri """ Update End Effector Coordinates Kevin """ endCoord = forwardKinematics(self.rex.joint_angles_fb[0] * R2D, self.rex.joint_angles_fb[1] * R2D, self.rex.joint_angles_fb[2] * R2D, self.rex.joint_angles_fb[3] * R2D) self.ui.rdoutX.setText(str("%2f" % (round(endCoord[0], 2)))) self.ui.rdoutY.setText(str("%2f" % (round(endCoord[1], 2)))) self.ui.rdoutZ.setText(str("%2f" % (round(endCoord[2], 2)))) self.ui.rdoutT.setText(str("%2f" % (round(endCoord[3], 2)))) """ Mouse position presentation in GUI TODO: after implementing workspace calibration display the world coordinates the mouse points to in the RGB video image. """ x = QtGui.QWidget.mapFromGlobal(self, QtGui.QCursor.pos()).x() y = QtGui.QWidget.mapFromGlobal(self, QtGui.QCursor.pos()).y() if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): self.ui.rdoutMousePixels.setText("(-,-,-)") self.ui.rdoutMouseWorld.setText("(-,-,-)") else: x = x - MIN_X y = y - MIN_Y z = self.video.currentDepthFrame[y][x] self.ui.rdoutMousePixels.setText("(%.0f,%.0f,%.0f)" % (x, y, z)) if (self.video.cal_flag == 2): M = self.video.aff_matrix v = np.float32([x, y, 1]) rw = np.dot(M, v) self.ui.rdoutMouseWorld.setText("(%.0f,%.0f,0)" % (rw[0], rw[1])) elif (self.video.extrinsic_cal_flag == 2): #Josh xy_in_rgb = np.float32([[[x, y]]]) #Josh xy_in_rgb_homogenous = np.array([x, y, 1.0]) self.video.rgb2dep_aff_matrix = np.array( [[1.09403672e0, 3.44369111e-3, -1.62867595e0], [-2.41966785e-3, 1.07534829e0, -2.69041712e1], [0., 0., 1.]]) #Josh xy_in_dep = np.dot(self.video.rgb2dep_aff_matrix, xy_in_rgb_homogenous) #Josh x_dep = int(xy_in_dep[0]) #Josh y_dep = int(xy_in_dep[1]) #Josh d = self.video.currentDepthFrame[y_dep][x_dep] Z = -1 #Z = 0.1236*np.tan(d/2842.5 + 1.1863)*1000 for idx in range(len(self.video.levels_mm)): if d <= self.video.levels_upper_d[ idx] and d >= self.video.levels_lower_d[idx]: Z = 941 - self.video.levels_mm[idx] break camera_coord = Z * cv2.undistortPoints( xy_in_rgb, self.video.intrinsic, self.video.distortion_array) camera_coord_homogenous = np.transpose( np.append(camera_coord, [Z, 1.0])) world_coord_homogenous = np.dot(self.video.extrinsic, camera_coord_homogenous) #camera_coord = Z*np.dot(self.video.inv_intrinsic,xy_in_rgb) #camera_coord_homo = np.concatenate((camera_coord,[1.]),axis = 0) #world_coord_homo = np.dot(self.video.extrinsic,camera_coord_homo) self.ui.rdoutMouseWorld.setText( "(%.0f,%.0f,%.0f)" % (world_coord_homogenous[0], world_coord_homogenous[1], world_coord_homogenous[2])) #self.ui.rdoutMouseWorld.setText("(%.0f,%.0f,%.0f)" % (x_dep,y_dep,d)) else: self.ui.rdoutMouseWorld.setText("(-,-,-)") """ Updates status label when rexarm playback is been executed. This can be extended to include other appropriate messages """ if (self.rex.plan_status == 1): self.ui.rdoutStatus.setText("Playing Back - Waypoint %d" % (self.rex.wpt_number + 1)) self.ui.rdoutStatus.setText( self.statemachine.getmestatus(combined=True)) def sliderChange(self): """ Function to change the slider labels when sliders are moved and to command the arm to the given position """ self.ui.rdoutBase.setText(str(self.ui.sldrBase.value())) self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%") self.rex.torque_multiplier = self.ui.sldrMaxTorque.value() / 100.0 self.rex.speed_multiplier = [self.ui.sldrSpeed.value() / 100.0 ] * self.rex.num_joints self.rex.joint_angles[0] = self.ui.sldrBase.value() * D2R self.rex.joint_angles[1] = self.ui.sldrShoulder.value() * D2R self.rex.joint_angles[2] = self.ui.sldrElbow.value() * D2R self.rex.joint_angles[3] = self.ui.sldrWrist.value() * D2R if (self.rex.num_joints == 6): self.rex.joint_angles[4] = self.ui.sldrWrist.value() * D2R self.rex.joint_angles[5] = self.ui.sldrWrist.value() * D2R self.rex.cmd_publish() def sliderinitChange(self): #Giri """ Function to change the slider labels when sliders are moved and to command the arm to the given position """ self.ui.rdoutBase.setText(str(self.ui.sldrBase.value())) self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value())) #Giri self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value())) #Giri self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value())) #Giri self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value())) #Giri self.ui.rdoutGrip2.setText(str(self.ui.sldrGrip2.value())) #Giri self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%") self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%") self.rex.torque_multiplier = self.ui.sldrMaxTorque.value() / 100.0 self.rex.speed_multiplier = [self.ui.sldrSpeed.value() / 100.0 ] * self.rex.num_joints #Giri{ self.rex.joint_angles[0] = self.ui.sldrBase.value() * D2R self.rex.joint_angles[1] = self.ui.sldrShoulder.value() * D2R self.rex.joint_angles[2] = self.ui.sldrElbow.value() * D2R self.rex.joint_angles[3] = self.ui.sldrWrist.value() * D2R if (len(self.rex.joint_angles) == 6): self.rex.joint_angles[4] = self.ui.sldrGrip1.value() * D2R self.rex.joint_angles[5] = self.ui.sldrGrip2.value() * D2R #Giri} self.rex.cmd_publish() #From Giri: I implemented all these function in a one function above, incase if the response time is bad, we can go back to the below five separate functions def sliderOtherChange(self): #Giri self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%") self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%") self.rex.torque_multiplier = self.ui.sldrMaxTorque.value() / 100.0 self.rex.speed_multiplier = [self.ui.sldrSpeed.value() / 100.0 ] * self.rex.num_joints self.rex.cmd_publish() def sliderBaseChange(self): #Kevin self.ui.rdoutBase.setText(str(self.ui.sldrBase.value())) self.rex.joint_angles[0] = self.ui.sldrBase.value() * D2R self.rex.cmd_publish() def sliderShoulderChange(self): #Kevin self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value())) self.rex.joint_angles[1] = self.ui.sldrShoulder.value() * D2R self.rex.cmd_publish() def sliderElbowChange(self): #Kevin self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value())) self.rex.joint_angles[2] = self.ui.sldrElbow.value() * D2R self.rex.cmd_publish() def sliderWristChange(self): #Kevin self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value())) self.rex.joint_angles[3] = self.ui.sldrWrist.value() * D2R self.rex.cmd_publish() def sliderGrip1Change(self): #Giri self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value())) self.rex.joint_angles[4] = self.ui.sldrGrip1.value() * D2R self.rex.cmd_publish() def sliderGrip2Change(self): #Giri self.ui.rdoutGrip2.setText(str(self.ui.sldrGrip2.value())) self.rex.joint_angles[5] = self.ui.sldrGrip2.value() * D2R self.rex.cmd_publish() def mousePressEvent(self, QMouseEvent): """ Function used to record mouse click positions for calibration """ """ Get mouse posiiton """ x = QMouseEvent.x() y = QMouseEvent.y() """ If mouse position is not over the camera image ignore """ if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): return """ Change coordinates to image axis """ self.last_click[0] = x - MIN_X self.last_click[1] = y - MIN_Y """ If calibration has been performed """ if (self.video.cal_flag == 1): """ Save last mouse coordinate """ self.video.mouse_coord[self.video.mouse_click_id] = [(x - MIN_X), (y - MIN_Y)] """ Update the number of used poitns for calibration """ self.video.mouse_click_id += 1 """ Update status label text """ self.ui.rdoutStatus.setText( "Affine Calibration: Click Point %d is recorded, please select Click Point %d" % (self.video.mouse_click_id, self.video.mouse_click_id + 1)) """ If the number of click is equal to the expected number of points computes the affine calibration. LAB TASK: Change this code to use your workspace calibration routine and NOT the simple calibration function as is done now. """ if (self.video.mouse_click_id == self.video.cal_points): """ Update status of calibration flag and number of mouse clicks """ self.video.cal_flag = 2 self.video.mouse_click_id = 0 """ Perform affine calibration with OpenCV """ self.video.aff_matrix = cv2.getAffineTransform( self.video.mouse_coord, self.video.real_coord) #print self.video.mouse_coord #Josh #print self.video.real_coord #Josh """ Updates Status Label to inform calibration is done """ self.ui.rdoutStatus.setText("Waiting for input") if (self.video.dep2rgb_aff_flag == 1): #Josh if (self.video.mouse_click_id < self.video.rgb_pts_num): self.video.rgb_coord[self.video.mouse_click_id * 2] = x - MIN_X self.video.rgb_coord[self.video.mouse_click_id * 2 + 1] = y - MIN_Y else: self.video.dep_coord[2 * (self.video.mouse_click_id - self.video.rgb_pts_num)] = [ (x - MIN_X), (y - MIN_Y), 1, 0, 0, 0 ] self.video.dep_coord[ 2 * (self.video.mouse_click_id - self.video.rgb_pts_num) + 1] = [0, 0, 0, (x - MIN_X), (y - MIN_Y), 1] self.video.mouse_click_id += 1 print self.video.mouse_click_id if (self.video.mouse_click_id == self.video.dep_pts_num): self.video.dep2rgb_aff_flag = 2 self.video.mouse_click_id = 0 A = self.video.dep_coord A_trans = np.transpose(A) A_AT = np.dot(A_trans, A) A_AT_inv = np.linalg.inv(A_AT) A_all = np.dot(A_AT_inv, A_trans) b = np.transpose(self.video.rgb_coord) tmp_dep2rgb_aff_array = np.dot(A_all, b) upper_dep2rgb_aff_matrix = np.reshape(tmp_dep2rgb_aff_array, (2, 3)) lower_dep2rgb_aff_matrix = np.array([[0, 0, 1]]) self.video.dep2rgb_aff_matrix = np.concatenate( (upper_dep2rgb_aff_matrix, lower_dep2rgb_aff_matrix), axis=0) self.video.rgb2dep_aff_matrix = np.linalg.inv( self.video.dep2rgb_aff_matrix) print self.video.rgb2dep_aff_matrix if (self.video.extrinsic_cal_flag == 1): #Josh self.video.extrinsic_cal_pixel_coord[self.video.mouse_click_id] = [ (x - MIN_X), (y - MIN_Y) ] self.video.mouse_click_id += 1 if (self.video.mouse_click_id == 3): """ retval, rvec, tvec = cv2.solvePnP(self.video.extrinsic_cal_world_coord, self.video.extrinsic_cal_pixel_coord, self.video.intrinsic, self.video.distortion_array) #print self.video.extrinsic_cal_pixel_coord, self.video.extrinsic_cal_world_coord self.video.mouse_click_id = 0 R,tmp = cv2.Rodrigues(rvec) tmp = np.concatenate((np.transpose(R),-1.0*tvec),axis=1) self.video.extrinsic = np.concatenate((tmp, np.float32([[0., 0., 0., 1.]])),axis = 0) print self.video.extrinsic self.video.inv_extrinsic = np.linalg.inv(self.video.extrinsic) self.video.extrinsic_cal_flag = 2 """ self.video.mouse_click_id = 0 tmp_coord = np.float32( [[self.video.extrinsic_cal_pixel_coord[0]], [self.video.extrinsic_cal_pixel_coord[1]], [self.video.extrinsic_cal_pixel_coord[2]]]) camera_coord = 941.0 * cv2.undistortPoints( tmp_coord, self.video.intrinsic, self.video.distortion_array) rot_rad = -1.0 * np.arctan2( (camera_coord[1][0][1] - camera_coord[0][0][1]), (camera_coord[1][0][0] - camera_coord[0][0][0])) if rot_rad < 0: z_rot = rot_rad + 0.5 * np.pi else: z_rot = 0.5 * np.pi - rot_rad R_T = np.array([[np.cos(z_rot), np.sin(z_rot), 0.0], [np.sin(z_rot), -1.0 * np.cos(z_rot), 0.0], [0.0, 0.0, -1.0]]) R = np.transpose(R_T) T = np.float32([ (camera_coord[0][0][0] + camera_coord[2][0][0]) / 2, (camera_coord[0][0][1] + camera_coord[2][0][1]) / 2, 941 ]) T = np.transpose(np.array([-1.0 * np.dot(R, T)])) tmp = np.concatenate((R, T), axis=1) self.video.extrinsic = np.concatenate( (tmp, np.float32([[0., 0., 0., 1.]])), axis=0) #self.video.writecaldata(self.video.extrinsic) self.video.inv_extrinsic = np.linalg.inv(self.video.extrinsic) print "Extrinsic Matrix = ", self.video.extrinsic self.video.extrinsic_cal_flag = 2 def deprgb_cali(self): #Josh self.video.dep2rgb_aff_flag = 1 self.ui.rdoutStatus.setText( "Start camera calibration by clicking mouse to select Click Point 1" ) def extrinsic_cali(self): #Josh alpha = 525.91386088 u0 = 312.41480582 beta = 526.52064559 v0 = 247.78962122 matrix1 = np.float32([[1 / alpha, 0., 0.], [0., 1 / beta, 0.], [0., 0., 1.0]]) matrix2 = np.float32([[1.0, 0., -u0], [0., 1., -v0], [0., 0., 1.0]]) self.video.inv_intrinsic = np.dot(matrix1, matrix2) self.video.intrinsic = np.float32([[alpha, 0., u0], [0., beta, v0], [0., 0., 1.]]) self.video.distortion_array = np.float32([ 2.45479098e-01, -8.27672136e-01, 1.05870966e-03, -3.01947277e-03, 1.08683931e+00 ]) self.video.extrinsic_cal_flag = 1 def teach(self): #Giri [x, y, z, orientation] = [0, 14, 1, 180.0] q = [0.0, 0.0, 0.0, 0.0] [q[0], q[1], q[2], q[3]] = inverseKinematics(x, y, z, orientation) print q self.rex.joint_angles[0] = q[0] * D2R self.rex.joint_angles[1] = q[1] * D2R self.rex.joint_angles[2] = q[2] * D2R self.rex.joint_angles[3] = q[3] * D2R self.rex.cmd_publish() def repeat(self): #Giri if (self.ui.btnUser7.text() == "Repeat"): self.statemachine.init(self.ui, self.rex, "Initialising") else: self.statemachine.repeat(self.ui, self.rex, False) #Giri def srepeat(self): #Giri self.statemachine.init(self.ui, self.rex, "initialising") def reset(self, hold=False): self.rex.torque_multiplier = self.ui.sldrMaxTorque.value() / 100.0 self.rex.speed_multiplier = [self.ui.sldrSpeed.value() / 100.0 ] * self.rex.num_joints self.rex.joint_angles[0] = 0.0 self.rex.joint_angles[1] = 0.0 self.rex.joint_angles[2] = 0.0 self.rex.joint_angles[3] = 0.0 if (len(self.rex.joint_angles) == 6 and hold == False): self.rex.joint_angles[4] = 0.0 self.rex.joint_angles[5] = 95 * D2R elif (len(self.rex.joint_angles) == 6 and hold == True): self.rex.joint_angles[4] = 0.0 self.rex.joint_angles[5] = -95 * D2R self.rex.cmd_publish() def getheight(self, angle, action="picking"): placement_offset = 1 # in cm if (angle - 90 < 0.1): gripping_height = 3 elif (angle - 120 < 0.1): gripping_height = 2 else: if (action == "picking"): gripping_height = 0.05 elif (action == "placing"): gripping_height = 0.05 + placement_offset return gripping_height def getIK(self, endCoord, angle=0, action="picking"): intermediate_height = 4 intermediate_angles = [0.0] * 6 angles = [0.0] * 6 [angles[0], angles[1], angles[2], angles[3]] = inverseKinematics( endCoord[0], endCoord[1], endCoord[2] + self.getheight(endCoord[3], action), endCoord[3]) if (self.checkfound(angles) ): # or self.checkfound(intermediate_angles,True)): if (abs(endCoord[3] - 90) < 0.1): endCoord[3] = 180 [angles[0], angles[1], angles[2], angles[3]] = inverseKinematics( endCoord[0], endCoord[1], endCoord[2] + self.getheight(endCoord[3], action), endCoord[3]) else: endCoord[3] = 90 [angles[0], angles[1], angles[2], angles[3]] = inverseKinematics( endCoord[0], endCoord[1], endCoord[2] + self.getheight(endCoord[3], action), endCoord[3]) if (self.checkfound(angles) ): # or self.checkfound(intermediate_angles,True)): endCoord[3] = 120 [angles[0], angles[1], angles[2], angles[3]] = inverseKinematics( endCoord[0] - 1, endCoord[1] + 1, endCoord[2] + self.getheight(endCoord[3], action), endCoord[3]) if (endCoord[3] == 120): [ intermediate_angles[0], intermediate_angles[1], intermediate_angles[2], intermediate_angles[3] ] = inverseKinematics(endCoord[0] - 1, endCoord[1] + 1, endCoord[2] + intermediate_height, endCoord[3]) # assuming it is reachable if (self.checkfound(intermediate_angles)): [ intermediate_angles[0], intermediate_angles[1], intermediate_angles[2], intermediate_angles[3] ] = inverseKinematics(endCoord[0] - 1, endCoord[1] + 1, endCoord[2] + intermediate_height, 90) # assuming it is reachable if (endCoord[3] == 180): [ intermediate_angles[0], intermediate_angles[1], intermediate_angles[2], intermediate_angles[3] ] = inverseKinematics(endCoord[0], endCoord[1], endCoord[2] + intermediate_height + self.getheight(endCoord[3], action), endCoord[3]) # assuming it is reachable print self.trim_angle(orientation(angles[0], angle * R2D)) if (action == "placing"): angles[4] = 90 + angles[0] else: angles[4] = -1 * self.trim_angle( orientation(angles[0], angle * R2D)) else: angles[4] = 0 self.checkfound(intermediate_angles, True) intermediate_angles[4] = angles[4] angles[5] = endCoord[3] intermediate_angles[5] = endCoord[3] if (self.checkfound(angles) and self.checkfound(intermediate_angles, False)): print "Cannot be picked" return [angles, intermediate_angles] def checkfound(self, angles, i=False): if (i): if (angles[0] == 0 and angles[1] == 0 and angles[2] == 0 and angles[3] == 0): print "intermediate angles not found" return True else: if (angles[0] == 0 and angles[1] == 0 and angles[2] == 0 and angles[3] == 0): print "final angles not found" return True return False def printfk(self, angles): temp = forwardKinematics(angles[0], angles[1], angles[2], angles[3]) print "Forward Kinematics output = ", str(temp[0]), str(temp[1]), str( temp[2]), str(temp[3]) def roundoff(self, angles): angles[0] = round(self.trim_angle(angles[0]), 2) angles[1] = round(self.trim_angle(angles[1]), 2) angles[2] = round(self.trim_angle(angles[2]), 2) angles[3] = round(self.trim_angle(angles[3]), 2) angles[4] = round(self.trim_angle(angles[4]), 2) #Dont alter these two angles angles[5] = round(self.trim_angle(angles[5]), 2) return angles def pick(self): #Josh global putx, puty, putz, putangle blockx, blocky, blockz, angle = self.get_color_block_world_coord( 'blue') #self.statemachine.setorientation(angle*R2D) [putx, puty, putz, putangle] = [-1 * blockx, blocky, blockz, angle] #endCoord = [20.2, -16.1, 4.4,90] #endCoord = [14.3, 19.4, 4.4,90] endCoord = [ blockx / 10, blocky / 10, (blockz) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, angle) if (debug): print "Block detector output = ", [ blockx, blocky, blockz, angle * R2D ] print "Input to Inverse Kinematics:", endCoord print "Output of Inverse Kinematics: ", angles self.printfk(angles) angles = self.roundoff(angles) intermediate_angles = self.roundoff(intermediate_angles) self.statemachine.setq(angles, intermediate_angles) self.statemachine.setmystatus( "testing", "picking", "picking") #mode="testing",action="picking") #self.statemachine.hold(self.ui,self.rex, angles,"picking") def place(self): endCoord = [putx / 10, puty / 10, (putz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, putangle) #wangle = (self.shortorientation((putangle)*R2D-45)-self.trim_angle(angles[0]))#self.shortorientation((angle)*R2D+45) #self.statemachine.set_orientations(wangle,2) if (debug): print "Placing according to Block detector output = ", [ putx, puty, putz, putangle * R2D ] print "Input to Inverse Kinematics:", endCoord self.printfk(angles) angles = self.roundoff(angles) intermediate_angles = self.roundoff(intermediate_angles) self.statemachine.setq(angles, intermediate_angles) self.statemachine.setmystatus( "testing", "placing", "placing") #mode="testing",action="placing") # self.statemachine.hold(self.ui,self.rex, angles,"placing") def get_color_block_world_coord(self, color): #Josh self.video.blockDetector() [blockx_rgb, blocky_rgb, angle] = self.video.color_detection(color) if (blockx_rgb == 0 and blocky_rgb == 0 and angle == 0): return 0, 0, 0, 0 xy_in_rgb = np.float32([[[blocky_rgb, blockx_rgb]]]) #Josh xy_in_rgb_homogenous = np.array([blocky_rgb, blockx_rgb, 1.0]) xy_in_dep = np.dot(self.video.rgb2dep_aff_matrix, xy_in_rgb_homogenous) #Josh x_dep = int(xy_in_dep[0]) #Josh y_dep = int(xy_in_dep[1]) #Josh d = self.video.currentDepthFrame[y_dep][x_dep] Z = -1 for idx in range(len(self.video.levels_mm)): if d <= self.video.levels_upper_d[ idx] and d >= self.video.levels_lower_d[idx]: Z = 941 - self.video.levels_mm[idx] break camera_coord = Z * cv2.undistortPoints(xy_in_rgb, self.video.intrinsic, self.video.distortion_array) camera_coord_homogenous = np.transpose( np.append(camera_coord, [Z, 1.0])) world_coord_homogenous = np.dot(self.video.extrinsic, camera_coord_homogenous) theta = np.arccos(self.video.extrinsic[0, 0]) angle = angle - theta return world_coord_homogenous[0], world_coord_homogenous[ 1], world_coord_homogenous[2], angle def getangles(self, color): blockx, blocky, blockz, angle = self.get_color_block_world_coord(color) if (blockx == 0 and blocky == 0 and blockx == 0 and angle == 0): angles = [0] * 6 intermediate_angles = [0] * 6 print "not found" self.statemachine.set_comp5_status("not found") else: [ current_mode, current_action, current_movement, current_motorstate ] = self.statemachine.getmestatus() endCoord = [(blockx) / 10, (blocky) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, angle, "picking") angles = self.roundoff(angles) intermediate_angles = self.roundoff(intermediate_angles) print "found" self.statemachine.set_comp5_status("found") self.statemachine.setq(angles, intermediate_angles) def generatecomp2(self): print "Entered generatecomp2" new_q = np.zeros([3, 6]) new_qh = np.zeros([3, 6]) #intermediate heights # blockx, blocky, blockz, angle = self.get_color_block_world_coord('red') final_x = 0 final_y = -220 blockz = 19.25 # final_x = blockx # final_y = blocky height = 40 endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 0) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[0] = self.roundoff(angles) new_qh[0] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 1) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[1] = self.roundoff(angles) new_qh[1] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 2) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[2] = self.roundoff(angles) new_qh[2] = self.roundoff(intermediate_angles) self.statemachine.setq_comp(new_q, new_qh) def generatecomp3(self): print "Entered generatecomp3" new_q = np.zeros([8, 6]) new_qh = np.zeros([8, 6]) #intermediate heights #blockx, blocky, blockz, angle = self.get_color_block_world_coord('red') blockz = 19.5 final_x = -150 final_y = -89.5 b = 43 endCoord = [(final_x) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[0] = self.roundoff(angles) new_qh[0] = self.roundoff(intermediate_angles) endCoord = [(final_x + b * 1) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[1] = self.roundoff(angles) new_qh[1] = self.roundoff(intermediate_angles) print "Genereated FK: ", forwardKinematics(new_q[1][0], new_q[1][1], new_q[1][2], new_q[1][3]) endCoord = [(final_x + b * 2) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[2] = self.roundoff(angles) new_qh[2] = self.roundoff(intermediate_angles) endCoord = [(final_x + b * 3) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[3] = self.roundoff(angles) new_qh[3] = self.roundoff(intermediate_angles) endCoord = [(final_x + b * 4) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[4] = self.roundoff(angles) new_qh[4] = self.roundoff(intermediate_angles) endCoord = [(final_x + b * 5) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[5] = self.roundoff(angles) new_qh[5] = self.roundoff(intermediate_angles) endCoord = [(final_x + b * 6) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[6] = self.roundoff(angles) new_qh[6] = self.roundoff(intermediate_angles) endCoord = [(final_x + b * 7) / 10, (final_y) / 10, (blockz) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[7] = self.roundoff(angles) new_qh[7] = self.roundoff(intermediate_angles) self.statemachine.setq_comp(new_q, new_qh) def generatecomp4(self): print "Entered generatecomp4" new_q = np.zeros([8, 6]) new_qh = np.zeros([8, 6]) #intermediate heights blockx, blocky, blockz, angle = self.get_color_block_world_coord( 'blue') final_x = 0 final_y = -220 height = 40 endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 0) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[0] = self.roundoff(angles) new_qh[0] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 1) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[1] = self.roundoff(angles) new_qh[1] = self.roundoff(intermediate_angles) print "Genereated FK: ", forwardKinematics(new_q[1][0], new_q[1][1], new_q[1][2], new_q[1][3]) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 2) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[2] = self.roundoff(angles) new_qh[2] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 3) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[3] = self.roundoff(angles) new_qh[3] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 4) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[4] = self.roundoff(angles) new_qh[4] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 5) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q[5] = self.roundoff(angles) new_qh[5] = self.roundoff(intermediate_angles) endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 6) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") # hard code joints angles down below here #new_q[6] = self.roundoff(angles) #new_qh[6] = self.roundoff(intermediate_angles) new_q[6] = [1., 2., 66.5, 15.2, 0, 90.] new_qh[6] = [1., 15.8, 29., 34.5, 0, 90.] endCoord = [ -(final_x) / 10, (final_y) / 10, (blockz + height * 7) / 10, gripper_orientation ] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") #new_q[7] = self.roundoff(angles) #new_qh[7] = self.roundoff(intermediate_angles) new_q[7] = [1., 15.8, 29., 34.5, 0, 90.] new_qh[7] = [1., 23., 9., 34.5, 0, 90.] self.statemachine.setq_comp(new_q, new_qh) def generate_comp5(self): print "Entered generate_comp5" n = 3 b = 6 N = (n + 1) * n / 2 coords = self.pyramid(n, b) new_q = np.zeros([N, 6]) new_qh = np.zeros([N, 6]) #intermediate heights endCoord = np.zeros([N, 4]) for i in range(0, N): endCoord[i] = [ coords[0, i], coords[1, i], coords[2, i], gripper_orientation ] [new_q[i], new_qh[i]] = self.getIK(endCoord[i], 0, "placing") print "pyramid: ", coords self.statemachine.setq_comp(new_q, new_qh) def pyramid(self, nlayers, spacing): #returns numpy array. coords = zeros((3, nlayers * int((nlayers + 1) / 2))) count = 0 blockheight = 3.85 for j in range(0, nlayers): for i in range(0, nlayers - j): print(nlayers - j - 1) coords[0][count] = 0 + spacing * (i - ((nlayers - j) / 2.0)) coords[1][count] = -11 coords[2][count] = (blockheight * j ) #define blockheight and offset count = count + 1 return coords def competition(self): if (self.ui.btnUser11.text() == "Enter Competition Mode"): self.ui.btnUser11.setText("Enter Testing Mode") self.ui.btnUser7.setText("Draw") self.ui.btnUser1.setText("Competition 1") self.ui.btnUser2.setText("Competition 2") self.ui.btnUser3.setText("Competition 3") self.ui.btnUser4.setText("Competition 4") self.ui.btnUser5.setText("Competition 5") self.ui.btnUser2.setEnabled(True) self.ui.btnUser7.setEnabled(True) elif (self.ui.btnUser11.text() == "Enter Testing Mode"): self.ui.btnUser11.setText("Enter Competition Mode") self.ui.btnUser7.setText("Smooth Repeat") self.ui.btnUser1.setText("Configure Servos") self.ui.btnUser2.setText("Depth and RGB Calibration") self.ui.btnUser3.setText("Extrinsic Calibration") self.ui.btnUser4.setText("Block Detection") self.ui.btnUser5.setText("Repeat") self.ui.btnUser2.setEnabled(False) self.ui.btnUser7.setEnabled(False) def btn1(self): if (self.ui.btnUser1.text() == "Configure Servos"): self.rex.cfg_publish_default() elif (self.ui.btnUser1.text() == "Competition 1"): if (self.sanity_check(1)): self.getangles('red') self.statemachine.setmystatus( "Competition 1", "picking", "picking") #mode="testing",action="picking") def btn2(self): if (self.ui.btnUser2.text() == "Depth and RGB Calibration"): self.deprgb_cali() elif (self.ui.btnUser2.text() == "Competition 2"): if (self.sanity_check(2)): self.generatecomp2() self.getangles('red') self.statemachine.setmystatus( "Competition 2", "picking", "picking") #mode="testing",action="picking") def btn3(self): global declutter_index, declutter_competition if (self.ui.btnUser3.text() == "Extrinsic Calibration"): self.extrinsic_cali() elif (self.ui.btnUser3.text() == "Competition 3"): declutter_index = 0 declutter_competition = "Competition 3" self.declutter() """ if(self.sanity_check(3)): self.generatecomp3() self.getangles('black') self.statemachine.setmystatus("Competition 3", "picking","picking")#mode="testing",action="picking") """ def btn4(self): global declutter_index, declutter_competition if (self.ui.btnUser4.text() == "Block Detector"): self.video.blockDetector() elif (self.ui.btnUser4.text() == "Competition 4"): declutter_index = 0 declutter_competition = "Competition 4" self.declutter() """ if(self.sanity_check(4)): self.generatecomp4() self.getangles('black') self.statemachine.setmystatus("Competition 4", "picking","picking")#mode="testing",action="picking") """ def btn5(self): if (self.ui.btnUser5.text() == "Repeat"): self.repeat() elif (self.ui.btnUser5.text() == "Competition 5"): self.generate_comp5() self.getangles('all') self.statemachine.setmystatus( "Competition 5", "picking", "picking") #mode="testing",action="picking") def declutter(self): blockx, blocky, blockz, angle = self.video.search_highest() levels_mm = np.float32( [0., 19.25, 57.75, 96.25, 134.75, 173.25, 211.75, 250.25]) level = (blockz - 19.25) / 38.5 print "level = ", level, "and levels_mm = ", levels_mm if (declutter_index == 2): if (declutter_competition == "Competition 3"): if (self.sanity_check(3)): self.generatecomp3() self.getangles('black') self.statemachine.setmystatus( "Competition 3", "picking", "picking") #mode="testing",action="picking") elif (declutter_competition == "Competition 4"): if (self.sanity_check(4)): self.generatecomp4() self.getangles('black') self.statemachine.setmystatus( "Competition 4", "picking", "picking") #mode="testing",action="picking") if (level >= 1 and declutter_index < 4): endCoord = [(blockx) / 10, (blocky) / 10, (blockz) / 10, gripper_orientation] print endCoord [angles, intermediate_angles] = self.getIK(endCoord, 0, "picking") print "from declutter IK output: ", [angles, intermediate_angles] angles = self.roundoff(angles) intermediate_angles = self.roundoff(intermediate_angles) if (not self.checkfound(angles) ): # and not self.checkfound(intermediate_angles,True)): self.generatedeclutter() self.statemachine.setq(angles, intermediate_angles) self.statemachine.setmystatus( "Declutter", "picking", "picking") #mode="testing",action="picking") else: if (declutter_competition == "Competition 3"): if (self.sanity_check(3)): self.generatecomp3() self.getangles('black') self.statemachine.setmystatus( "Competition 3", "picking", "picking") #mode="testing",action="picking") elif (declutter_competition == "Competition 4"): if (self.sanity_check(4)): self.generatecomp4() self.getangles('black') self.statemachine.setmystatus( "Competition 4", "picking", "picking") #mode="testing",action="picking") def generatedeclutter(self): print "Entered generatedeclutter" global declutter_index if (declutter_index < 4): x = [0, 250, 0, 180] y = [-240, -50, 240, -150] final_x = x[declutter_index] final_y = y[declutter_index] final_z = 19.25 height = 20 endCoord = [(final_x) / 10, (final_y) / 10, (final_z + height) / 10, gripper_orientation] [angles, intermediate_angles] = self.getIK(endCoord, 0, "placing") new_q = self.roundoff(angles) new_qh = self.roundoff(intermediate_angles) declutter_index = declutter_index + 1 self.statemachine.setq_comp(new_q, new_qh) def sanity_check(self, comp): success = False if (comp == 1 or comp == 2): #search for red,blue,green colors = ["red", "blue", "green"] [pos_colors, success] = self.video.blockDetector(colors) elif (comp == 4 or comp == 3): #search for red,blue,green colors = [ "red", "blue", "green", "orange", "violet", "black", "yellow", "pink" ] [pos_colors, success] = self.video.blockDetector(colors) return success """main function"""
class Gui(QtGui.QMainWindow): """ Main GUI Class contains the main function and interfaces between the GUI and functions """ def __init__(self, parent=None): QtGui.QWidget.__init__(self, parent) self.ui = Ui_MainWindow() self.ui.setupUi(self) """ Main Variables Using Other Classes""" self.rex = Rexarm() self.video = Video() self.statemachine = Statemachine() self.pos_idx_teach = 0 """ Other Variables """ self.last_click = np.float32([0, 0]) self.check_move_time = 60 self.check_time = 70 self.check_move_counter = 0 self.d_t = 10. * self.check_move_time / 1000. self.spe_coef = [0.0] * self.rex.num_joints self.pos_pre = [0.0] * self.rex.num_joints self.plan_point = 0 self.move_to_point_cn = 0 self.event3_color_check = [0] * 8 self.event3_finish = 0 self.event4_color_idx = 0 self.vel = [] self.event4_f_idx = 0 self.event5_block_num = 0 """ Set GUI to track mouse """ QtGui.QWidget.setMouseTracking(self, True) """ GUI timer Creates a timer and calls update_gui() function according to the given time delay (27ms) """ self._timer = QtCore.QTimer(self) self._timer.timeout.connect(self.update_gui) self._timer.start(27) """ LCM Arm Feedback timer Creates a timer to call LCM handler for Rexarm feedback """ self._timer2 = QtCore.QTimer(self) self._timer2.timeout.connect(self.rex.get_feedback) self._timer2.start() # self._timer_setplan = QtCore.QTimer(self) # self._timer_setplan.timeout.connect(self.setplan) # self._timer_teach = QtCore.QTimer(self) # self._timer_teach.timeout.connect(self.check_move) self._timer_waypoint = QtCore.QTimer(self) self._timer_waypoint.timeout.connect(self.move_to_point) self._timer_event1 = QtCore.QTimer(self) self._timer_event2 = QtCore.QTimer(self) self._timer_event3 = QtCore.QTimer(self) self._timer_event4 = QtCore.QTimer(self) self._timer_event5 = QtCore.QTimer(self) """ Connect Sliders to Function TODO: CONNECT THE OTHER 5 SLIDERS IMPLEMENTED IN THE GUI """ self.ui.sldrBase.valueChanged.connect(self.sliderChange) self.ui.sldrShoulder.valueChanged.connect(self.sliderChange) self.ui.sldrElbow.valueChanged.connect(self.sliderChange) self.ui.sldrSpeed.valueChanged.connect(self.sliderChange) self.ui.sldrWrist.valueChanged.connect(self.sliderChange) self.ui.sldrGrip1.valueChanged.connect(self.sliderChange) self.ui.sldrGrip2.valueChanged.connect(self.sliderChange) self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange) """ Initial command of the arm to home position""" self.sliderChange() """ Connect Buttons to Functions TODO: NAME AND CONNECT BUTTONS AS NEEDED """ self.ui.btnUser1.setText("Configure Servos") self.ui.btnUser1.clicked.connect(self.rex.cfg_publish_default) self.ui.btnUser2.setText("Simple Calibration") self.ui.btnUser2.clicked.connect(self.simple_cal) # self.ui.btnUser3.setText("start teach and repeat") # self.ui.btnUser3.clicked.connect(self.teach_repeat_start) # self.ui.btnUser4.setText("record position") # self.ui.btnUser4.clicked.connect(self.teach_repeat_record) # self.ui.btnUser5.setText("play teach and repeat") # self.ui.btnUser5.clicked.connect(self.teach_repeat_play) # self.ui.btnUser6.setText("save rgb") # self.ui.btnUser6.clicked.connect(self.save_rgb) self.ui.btnUser3.setText("EVENT1") self.ui.btnUser3.clicked.connect(self.event1) self.ui.btnUser4.setText("EVENT2") self.ui.btnUser4.clicked.connect(self.event2) self.ui.btnUser5.setText("EVENT3") self.ui.btnUser5.clicked.connect(self.event3) self.ui.btnUser6.setText("EVENT4") self.ui.btnUser6.clicked.connect(self.event4) self.ui.btnUser7.setText("EVENT5") self.ui.btnUser7.clicked.connect(self.event5) self.ui.btnUser8.setText("depth test") self.ui.btnUser8.clicked.connect(self.video.blockDetector) self.ui.btnUser9.setText("recover") self.ui.btnUser9.clicked.connect(self.recover) self.ui.btnUser10.setText("click put") self.ui.btnUser10.clicked.connect(self.click_put) self.ui.btnUser12.setText("Emergency Stop!") self.ui.btnUser12.clicked.connect(self.estop) # def save_rgb(self): # img = cv2.cvtColor(self.video.currentVideoFrame, cv2.COLOR_RGB2BGR) # cv2.imwrite('real_example.png',img) # def teach_repeat_start(self): # print "i enter" # global Record_Value # Record_Value = [[]] # self.statemachine.idle(self.rex) # # self.statemachine.moving(self.rex, self.rex.rexarm_IK([207,14, 30, np.pi/2],1) + [0.0, -10], [0.1,0.1,0.1,0.1,0.1,0.1]) # # print self.rex.rexarm_IK([-100,-100, 40, np.pi/2],1) # def teach_repeat_record(self): # temp_record = [self.rex.joint_angles_fb[0], # self.rex.joint_angles_fb[1], # self.rex.joint_angles_fb[2], # self.rex.joint_angles_fb[3], # self.rex.joint_angles_fb[4]] # if len(Record_Value[0]) == 0: # Record_Value[0] = temp_record # else : # Record_Value.append(temp_record) # def teach_repeat_play(self): # print Record_Value # self._timer_teach.start(self.check_time) # # def check_move(self): # print "111111" # if len(Record_Value[0]) == 0: # self.check_move_counter = 0 # self._timer_teach.stop() # return # if self.check_move_counter == 0: # T = np.array([[1,0,0,0,0,0],[0,1,0,0,0,0],[0,0,2,0,0,0],[1,self.d_t,self.d_t**2,self.d_t**3,self.d_t**4,self.d_t**5], # [0,1,2*self.d_t,3*self.d_t**2,4*self.d_t**3,5*self.d_t**4],[0,0,2,6*self.d_t,12*self.d_t**2,20*self.d_t**3]]) # for i in range(self.rex.num_joints): # self.spe_coef[i] = np.dot(np.linalg.inv(T), np.array([[self.rex.joint_angles_fb[i]],[0.01],[0], # [Record_Value[self.pos_idx_teach][i]],[0.1],[0.]])) # print "haha" # print Record_Value[self.pos_idx_teach] # print "fb" # print self.rex.joint_angles_fb # self.check_move_counter += 1 # pos = [0.] * self.rex.num_joints # speed = [0.] * self.rex.num_joints # for i in range(self.rex.num_joints): # pos[i] = np.polynomial.polynomial.polyval((self.check_move_counter * self.check_move_time / 1000), self.spe_coef[i]) # if i == 3: # speed[i] = abs(np.polynomial.polynomial.polyval((self.check_move_counter * self.check_move_time / 1000), # np.polynomial.polynomial.polyder(self.spe_coef[i]))) / SPEED_AX12 # else: # speed[i] = abs(np.polynomial.polynomial.polyval((self.check_move_counter * self.check_move_time / 1000), # np.polynomial.polynomial.polyder(self.spe_coef[i]))) / SPEED_MX28 * 20 # if self.check_move_counter * self.check_move_time / 1000. <= self.d_t and np.linalg.norm(np.transpose(np.asarray(Record_Value[self.pos_idx_teach])) - np.transpose(np.asarray(self.rex.joint_angles_fb))) > 0.1: # # and np.linalg.norm(Record_Value[self.pos_idx_teach] - np.asarray(self.rex.joint_angles_fb)) > 0.1 # if self.pos_pre != pos: # self.statemachine.moving(self.rex, pos, speed) # self.pos_pre = pos # print "pos" # print pos # print "feedback" # print self.rex.joint_angles_fb # print "speed" # print speed # print self.check_move_counter # # self.check_move_counter += 1 # if np.linalg.norm(np.transpose(np.asarray(pos)) - np.transpose(np.asarray(self.rex.joint_angles_fb)),axis = 1) < 0.01: # self.check_move_counter += 1 # else: # self.check_move_counter = 0 # if self.pos_idx_teach < len(Record_Value) - 1: # self.pos_idx_teach += 1; # else: # print "i stop" # self.pos_idx_teach = 0 # self._timer_teach.stop() # print "222222" # def check_move(self): # pos = 0 # if len(Record_Value[0]) == 0: # self._timer_teach.stop() # return # print len(Record_Value[self.pos_idx_teach]) # print np.linalg.norm(np.asarray(Record_Value[self.pos_idx_teach]) - np.asarray(self.rex.joint_angles_fb[0:5])) # if np.linalg.norm(np.transpose(np.asarray(Record_Value[self.pos_idx_teach])) - np.transpose(np.asarray(self.rex.joint_angles_fb[0:5]))) > 0.04: # pos = Record_Value[self.pos_idx_teach] + [0] # self.statemachine.moving(self.rex, pos,[0.1,0.1,0.1,0.1,0.1,0.1]) # print pos # else: # if self.pos_idx_teach < len(Record_Value) - 1: # self.pos_idx_teach += 1; # else: # print "i stop" # self.pos_idx_teach = 0 # self._timer_teach.stop() def recover(self): self.rex.moving_status = 0 def move_to_point(self): pos = 0 if len(self.rex.plan) == 0: self._timer_waypoint.stop() return # print len(self.rex.plan[self.pos_idx_teach]) # print self.check_arrival(self.rex.plan[self.pos_idx_teach],self.rex.joint_angles_fb) if self.check_arrival(self.rex.plan[self.pos_idx_teach], self.rex.joint_angles_fb ) > 0.07 and self.move_to_point_cn < 40: pos = self.rex.plan[self.pos_idx_teach] vel = self.vel[self.pos_idx_teach] self.statemachine.moving(self.rex, pos, vel) self.move_to_point_cn += 1 # print "load load" # print self.rex.load_fb # print "going to pos:" # print pos else: if self.move_to_point_cn >= 70: print "cant move to that point within %d s" % ( self.check_time * self.move_to_point_cn) self.move_to_point_cn = 0 if self.pos_idx_teach < len(self.rex.plan) - 1: self.pos_idx_teach += 1 else: print "i stop" self.pos_idx_teach = 0 self.rex.plan = [] self.rex.plan_status = 0 self._timer_waypoint.stop() def click_grab(self): x = self.last_click[0] y = self.last_click[1] z = 0. world_z = 0. rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) DEP_xy = np.dot(M_RGBtoDEP, v) if ((DEP_xy[0] >= 0) and (DEP_xy[0] <= 640) and (DEP_xy[1] >= 0) and (DEP_xy[1] <= 480)): world_z = self.video.depth - 123.6 * np.tan( self.video.currentDepthFrame[int(DEP_xy[1])][int(DEP_xy[0])] / 2842.5 + 1.1863) z = self.video.currentDepthFrame[int(DEP_xy[1])][int(DEP_xy[0])] rw = np.dot( np.linalg.inv(self.video.WORLDtoRGB[:, [0, 1, 3]]), np.subtract(v, self.video.WORLDtoRGB[:, 2:3] * (-world_z))) else: z = 0.0 if rw[2] != 0: # self.statemachine.moving(self.rex, self.rex.rexarm_IK([rw[0]/rw[2],rw[1]/rw[2], world_z + 30, np.pi/2],1) + [0.0, -10], [0.1,0.1,0.1,0.1,0.1,0.1]) t = self.rex.rexarm_IK( [rw[0] / rw[2], rw[1] / rw[2], world_z - 9, np.pi], 1) self.rex.plan = [[t[0], -200, -200, -200, 0, -50 * D2R], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, 7 * D2R], [-200, 0, -200, -200, -200, -200], [0, 0, 0, 0 * D2R, 0, -200]] self._timer_waypoint.start(self.check_time) print "plan" print self.rex.plan else: print "try again" def click_put(self): x = self.last_click[0] y = self.last_click[1] z = 0. world_z = 0. rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) DEP_xy = np.dot(M_RGBtoDEP, v) if ((DEP_xy[0] >= 0) and (DEP_xy[0] <= 640) and (DEP_xy[1] >= 0) and (DEP_xy[1] <= 480)): world_z = self.video.depth - 123.6 * np.tan( self.video.currentDepthFrame[int(DEP_xy[1])][int(DEP_xy[0])] / 2842.5 + 1.1863) z = self.video.currentDepthFrame[int(DEP_xy[1])][int(DEP_xy[0])] rw = np.dot( np.linalg.inv(self.video.WORLDtoRGB[:, [0, 1, 3]]), np.subtract(v, self.video.WORLDtoRGB[:, 2:3] * (-world_z))) else: z = 0.0 if rw[2] != 0: # self.statemachine.moving(self.rex, self.rex.rexarm_IK([rw[0]/rw[2],rw[1]/rw[2], world_z + 30, np.pi/2],1) + [0.0, -10], [0.1,0.1,0.1,0.1,0.1,0.1]) t = self.rex.rexarm_IK( [rw[0] / rw[2], rw[1] / rw[2], world_z + 40, np.pi / 2], 1) self.rex.plan = [[t[0], -200, -200, -200, 0, -200], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, -20 * D2R], [-200, 0, -200, -200, -200, -200], [0, 0, 0, 0 * D2R, 0, -200]] self._timer_waypoint.start(self.check_time) print "plan" print self.rex.plan else: print "try again" def check_arrival(self, pos0, pos1): t = 0 for i in range(len(pos0)): if pos0[i] != -200: if i != 5: t = t + (pos0[i] - pos1[i])**2 else: t = t + 0.25 * (pos0[i] - pos1[i])**2 return np.sqrt(t) def event1(self): print "entering event1" self.video.det_pts_ = [] self.video.blockDetector() self.plan_point = 0 self._timer_event1.timeout.connect(self.event1_timer) self._timer_event1.start(self.check_time) def event2(self): print "entering event2" self._timer_event2.stop() self.video.det_pts_ = [] self.video.blockDetector() self.plan_point = 0 self._timer_event2.timeout.connect(self.event2_timer) self._timer_event2.start(self.check_time) def event3(self): print "entering event3" # self.video.blockDetector() self.video.det_pts_ = [] self.event3_finish = 0 self.plan_point = 0 self.event3_color_check = [0] * 8 self._timer_event3.timeout.connect(self.event3_timer) self._timer_event3.start(self.check_time) def event4(self): print "entering event4" # self.video.blockDetector() self.video.det_pts_ = [] self.plan_point = 0 self.event4_color_idx = 0 self.event4_f_idx = 0 self._timer_event4.timeout.connect(self.event4_timer) self._timer_event4.start(self.check_time) def event5(self): print "entering event5" # self.video.blockDetector() self.video.det_pts_ = [] self.plan_point = 0 self.event5_block_num = 0 self._timer_event5.timeout.connect(self.event5_timer) self._timer_event5.start(self.check_time) def event1_timer(self): if self.rex.moving_status == 1: self._timer_event1.stop() self.rex.plan = [] self.vel = [] self._timer_waypoint.stop() if len(self.video.det_pts_) == 0: print "no block detected" self.plan_point = 0 self._timer_event1.stop() else: if self.rex.plan_status == 0 and self.plan_point < len( self.video.det_pts_): x = self.video.det_pts_[self.plan_point].rgb_real_pos[0] y = self.video.det_pts_[self.plan_point].rgb_real_pos[1] print "grapping %d %d %d" % (self.plan_point, x, y) world_z = self.video.det_pts_[self.plan_point].depth rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) t = self.rex.rexarm_IK([x, y, world_z, np.pi / 2], 1) t1 = self.rex.rexarm_IK([x, -y, world_z, np.pi / 2], 1) # if t[0] < 0: # t[0] = np.pi + t[0] # t[1] = 0 - t[1] # t[2] = 0 - t[2] # t[3] = 0 - t[3] # if t1[0] < 0: # t1[0] = np.pi + t1[0] # t1[1] = 0 - t1[1] # t1[2] = 0 - t1[2] # t1[3] = 0 - t1[3] # self.rex.plan = [[t[0],-200,-200,-200,0,-30*D2R],[-200,-200,t[2],t[3],0,-200],[-200,t[1],-200,-200,0,-200],[-200,-200,-200,-200,0,10*D2R],[-200,0,-200,-200,-200,-200],[-200,0,0,0*D2R,0,-200]] self.rex.plan = [[t[0], -200, t[2], t[3], 0, -30 * D2R], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, 8 * D2R], [ -200, t1[1] - 55 * D2R if t1[1] - 55 * D2R > 0 else 0, -200, -200, -200, -200 ]] # t = self.rex.rexarm_IK([x,-y, world_z, np.pi/2],1) self.rex.plan.extend( [[t1[0], -200, -200, -200, 0, -200], [-200, -200, t1[2], t1[3], 0, -200], [-200, t1[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, -15 * D2R], [ -200, t1[1] - 55 * D2R if t1[1] - 55 * D2R > 0 else 0, t1[2] + 5 * D2R, -200, -200, -200 ]]) self.vel = [[0.3, 0.25, 0.15, 0.15, 0.7, 0.8]] * len( self.rex.plan) self.plan_point += 1 self._timer_waypoint.start(self.check_time) self.rex.plan_status = 1 elif self.plan_point == len( self.video.det_pts_) and self.rex.plan_status == 0: print "event1 stops" self._timer_event1.stop() self.rex.plan = [] self.vel = [] else: pass def event2_timer(self): if self.rex.moving_status == 1: self._timer_event2.stop() self.rex.plan = [] self._timer_waypoint.stop() if len(self.video.det_pts_) == 0: print "no block detected" self.plan_point = 0 self._timer_event2.stop() else: if self.rex.plan_status == 0 and self.plan_point < len( self.video.det_pts_): x = self.video.det_pts_[self.plan_point].rgb_real_pos[0] y = self.video.det_pts_[self.plan_point].rgb_real_pos[1] print "grapping %d %d %d" % (self.plan_point, x, y) world_z = self.video.det_pts_[self.plan_point].depth rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) t = self.rex.rexarm_IK([x, y, world_z, np.pi / 2], 1) t1 = self.rex.rexarm_IK( [EVEN2_X, EVEN2_Y, 25 + 38 * self.plan_point, np.pi / 2], 1) self.rex.plan = [[t[0], -200, t[2], t[3], 0, -20 * D2R], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, 8 * D2R], [ -200, t1[1] - 55 * D2R if t1[1] - 55 * D2R > 0 else 0, t1[2], t1[3], -200, -200 ]] self.rex.plan.extend( [[t1[0], -200, t1[2], t1[3], 0, -200], [-200, t1[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, -20 * D2R], [ -200, t1[1] - 55 * D2R if t1[1] - 55 * D2R > 0 else 0, t1[2] + 5 * D2R, -200, -200, -200 ]]) self.vel = [[0.3, 0.25, 0.15, 0.15, 0.7, 0.8]] * len( self.rex.plan) self.plan_point += 1 self._timer_waypoint.start(self.check_time) self.rex.plan_status = 1 elif self.plan_point == len( self.video.det_pts_) and self.rex.plan_status == 0: print "event2 stops" self._timer_event2.stop() self.rex.plan = [] self.vel = [] else: pass def event3_timer(self): if self.rex.moving_status == 1: self._timer_event3.stop() self.rex.plan = [] self._timer_waypoint.stop() if len(self.video.det_pts_) == 0: print "begin to detect" self.video.blockDetector() points_temp = [] for points_i in self.video.det_pts_: if self.event3_color_check[ points_i.color] == 0 and points_i.rgb_real_pos[0] < 0: self.event3_color_check[points_i.color] = 1 points_temp.append(points_i) self.video.det_pts_ = points_temp self.plan_point = 0 if len(self.video.det_pts_) == 0: if self.event3_finish == 0 and self.rex.plan_status == 0: print "pushing blocks" self.rex.plan = [[ -115 * D2R, 68 * D2R, 19 * D2R, 76 * D2R, -115 * D2R, 0 ], [ -122 * D2R, 57 * D2R, 47 * D2R, 76 * D2R, -122 * D2R, 0 ]] self.vel = [[0.2, 0.1, 0.05, 0.05, 0.3, 0.3]] * len( self.rex.plan) self._timer_waypoint.start(self.check_time) self.rex.plan_status = 1 self.event3_finish = 1 elif self.event3_finish == 1 and self.rex.plan_status == 0: print "event finishes" self._timer_event3.stop() self.rex.plan = [] self.vel = [] return # self._timer_event.stop() if self.rex.plan_status == 0 and self.plan_point < len( self.video.det_pts_): x = self.video.det_pts_[self.plan_point].rgb_real_pos[0] y = self.video.det_pts_[self.plan_point].rgb_real_pos[1] print "grapping %d %d %d" % (self.plan_point, x, y) world_z = self.video.det_pts_[self.plan_point].depth rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) if np.arctan2(y, x) < -np.pi / 2 and np.arctan2( y, x) > -3 * np.pi / 4: t = self.rex.rexarm_IK([x, y, world_z, np.pi / 2], 1) else: t = self.rex.rexarm_IK([x, y, world_z - 5, np.pi / 2], 1) self.rex.plan = [[t[0] - 2 * D2R, -200, -200, -200, 0, -30 * D2R], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, 8 * D2R], [-200, 0, -200, -200, -200, -200], [-200, 0, 0, 0 * D2R, 0, -200]] self.vel = [[0.2, 0.15, 0.13, 0.13, 0.7, 0.7]] * len(self.rex.plan) color_idx = self.video.det_pts_[self.plan_point].color t = self.rex.rexarm_IK( [EVENT3_X, EVENT3_Y[color_idx], 24 + 38, np.pi / 2], 1) t1 = self.rex.rexarm_IK( [EVENT3_X, EVENT3_Y[color_idx], 24, np.pi / 2], 1) # self.rex.plan.extend([[t[0],-200,-200,-200,t[0],-200],[-200,-200,t[2],t[3],-200,-200],[-200,t[1],-200,-200,-200,-200],[-200,-200,-200,-200,-200,-50*D2R],[-200,0,-200,-200,-200,-200],[-200,0,0,0*D2R,0,-200]]) t4 = t[0] + np.pi / 2 if t[0] < 0 else t[0] - 3 * np.pi / 2 self.rex.plan.extend([[t[0], -200, -200, -200, t4, -200], [-200, -200, t[2], t[3], -200, -200], [-200, t[1], -200, -200, -200, -200], [t1[0], t1[1], t1[2], t1[3], t4, -200], [-200, -200, -200, -200, -200, -50 * D2R], [-200, 0, -200, -200, -200, -200], [-200, 0, 0, 0 * D2R, 0, -200]]) self.vel.extend([[0.2, 0.15, 0.13, 0.13, 0.7, 0.7], [0.2, 0.15, 0.13, 0.13, 0.7, 0.7], [0.2, 0.15, 0.13, 0.13, 0.7, 0.7], [0.07, 0.05, 0.05, 0.05, 0.7, 0.5], [0.2, 0.15, 0.13, 0.13, 0.7, 0.7], [0.2, 0.15, 0.13, 0.13, 0.7, 0.7], [0.2, 0.15, 0.13, 0.13, 0.7, 0.7]]) self.event3_color_check[self.video.det_pts_[ self.plan_point].color] = 1 self.plan_point += 1 self._timer_waypoint.start(self.check_time) self.rex.plan_status = 1 elif self.plan_point == len( self.video.det_pts_) and self.rex.plan_status == 0: print "event3 finish once" # self._timer_event.stop() self.rex.plan = [] self.vel = [] self.plan_point = 0 self.video.det_pts_ = [] else: pass def event4_timer(self): color_status = 0 if self.rex.moving_status == 1: self._timer_event4.stop() self.rex.plan = [] self._timer_waypoint.stop() if len(self.video.det_pts_) == 0: print "begin to detect" self.video.blockDetector() points_temp = [] if self.event4_color_idx == 8: print "event finishes" self._timer_event4.stop() self.rex.plan = [] self.vel = [] return for points_i in self.video.det_pts_: if points_i.color == RE_COLOR[self.event4_color_idx]: points_temp.append(points_i) self.event4_color_idx += 1 color_status = 1 break if color_status == 0: for points_i in self.video.det_pts_: if points_i.depth > 50 and points_i.rgb_real_pos[0] < 0: points_temp.append(points_i) break self.video.det_pts_ = points_temp self.plan_point = 0 # self._timer_event.stop() if self.rex.plan_status == 0 and self.plan_point < len( self.video.det_pts_): x = self.video.det_pts_[self.plan_point].rgb_real_pos[0] y = self.video.det_pts_[self.plan_point].rgb_real_pos[1] print "grapping %d %d %d" % (self.plan_point, x, y) world_z = self.video.det_pts_[self.plan_point].depth rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) t = self.rex.rexarm_IK([x, y, world_z, np.pi / 2], 1) self.rex.plan = [[t[0], -200, -200, -200, 0, -30 * D2R], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, 8 * D2R], [-200, 0, -200, -200, -200, -200], [-200, 0, 0, 0 * D2R, 0, -200]] self.vel = [[0.25, 0.2, 0.18, 0.18, 0.7, 0.7]] * len(self.rex.plan) color_idx = self.video.det_pts_[self.plan_point].color if color_status == 1: t = self.rex.rexarm_IK([ EVENT4_X, EVENT4_Y, (self.event4_color_idx - 1) * 38 + 24, np.pi / 2 ], 1) else: t = self.rex.rexarm_IK([ EVENT4_XF[self.event4_f_idx], EVENT4_YF[self.event4_f_idx], 24, np.pi / 2 ], 1) self.event4_f_idx += 1 self.rex.plan.extend([[t[0], -200, -200, -200, 0, -200], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, -20 * D2R], [-200, 0, t[2], -200, -200, -200], [-200, 0, 0, 0 * D2R, 0, -200]]) self.vel.extend([[0.25, 0.2, 0.18, 0.18, 0.7, 0.7]] * 6) self.plan_point += 1 self._timer_waypoint.start(self.check_time) self.rex.plan_status = 1 elif self.plan_point == len( self.video.det_pts_) and self.rex.plan_status == 0: print "event4 finish once" # self._timer_event.stop() self.rex.plan = [] self.vel = [] self.plan_point = 0 self.video.det_pts_ = [] else: pass def event5_timer(self): if self.rex.moving_status == 1: self._timer_event5.stop() self.rex.plan = [] self._timer_waypoint.stop() if len(self.video.det_pts_) == 0: if self.event5_block_num == 29: print "event finishes" self._timer_event5.stop() self.rex.plan = [] self.vel = [] return print "begin to detect" self.video.blockDetector() points_temp = [] points_max = 0 for points_i in self.video.det_pts_: if points_i.rgb_real_pos[0] > 0 and abs( np.arctan2(points_i.rgb_real_pos[1], points_i.rgb_real_pos[0])) > points_max: points_temp.append(points_i) points_max = abs( np.arctan2(points_i.rgb_real_pos[1], points_i.rgb_real_pos[0])) # print points_i.depth self.video.det_pts_ = [points_temp[-1]] self.plan_point = 0 if self.rex.plan_status == 0 and self.plan_point < len( self.video.det_pts_): x = self.video.det_pts_[self.plan_point].rgb_real_pos[0] y = self.video.det_pts_[self.plan_point].rgb_real_pos[1] print "grapping %d %d %d" % (self.plan_point, x, y) world_z = self.video.det_pts_[self.plan_point].depth rw = np.array([[0], [0], [0]]) M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) t = self.rex.rexarm_IK([x, y, world_z, np.pi / 2], 1) if x > 0 and y > 0: t[0] -= 2 * D2R self.rex.plan = [[t[0], -200, -200, -200, 0, -30 * D2R], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, 8 * D2R], [-200, 0, -200, -200, -200, -200], [-200, 0, 0, 0 * D2R, 0, -200]] self.vel = [[0.3, 0.2, 0.18, 0.18, 0.7, 0.7]] * len(self.rex.plan) color_idx = self.video.det_pts_[self.plan_point].color if self.event5_block_num < 28: t = self.rex.rexarm_IK([ EVENT5_X[self.event5_block_num], EVENT5_Y[self.event5_block_num], 29 + EVENT5_Z[self.event5_block_num] * 38, np.pi / 2 ], 1) else: t = self.rex.rexarm_IK([ EVENT5_X[27], EVENT5_Y[27], 29 + EVENT5_Z[27] * 38 + 38, np.pi / 2 ], 1) self.rex.plan.extend([[t[0], -200, -200, -200, 0, -200], [-200, -200, t[2], t[3], 0, -200], [-200, t[1], -200, -200, 0, -200], [-200, -200, -200, -200, 0, -20 * D2R], [-200, 0, t[2] + 5 * D2R, -200, -200, -200], [-200, 0, 0, 0 * D2R, 0, -200]]) self.vel.extend([[0.285, 0.2, 0.18, 0.18, 0.7, 0.7]] * 6) self.plan_point += 1 self.event5_block_num += 1 self._timer_waypoint.start(self.check_time) self.rex.plan_status = 1 elif self.plan_point == len( self.video.det_pts_) and self.rex.plan_status == 0: print "event5 finish once" # self._timer_event.stop() self.rex.plan = [] self.vel = [] self.plan_point = 0 self.video.det_pts_ = [] else: pass def estop(self): self.statemachine.estop(self.rex) def update_gui(self): """ update_gui function Continuously called by timer1 """ """ Renders the video frames Displays a dummy image if no video available HINT: you can use the radio buttons to select different video sources like is done below """ if (self.video.kinectConnected == 1): try: self.video.captureVideoFrame() self.video.captureDepthFrame() except TypeError: self.video.kinectConnected = 0 self.video.loadVideoFrame() self.video.loadDepthFrame() if (self.ui.radioVideo.isChecked()): self.ui.videoFrame.setPixmap(self.video.convertFrame()) # x = QtGui.QWidget.mapFromGlobal(self,QtGui.QCursor.pos()).x() # y = QtGui.QWidget.mapFromGlobal(self,QtGui.QCursor.pos()).y() # if ((x > MIN_X) and (x < MAX_X) and (y > MIN_Y) and (y < MAX_Y)): # img = self.video.currentVideoFrame # hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HSV) # y -= 30 # x -= 310 # h = hsv[y][x][0] # s = hsv[y][x][1] # v = hsv[y][x][2] # global hp, xp, yp # if ((xp - x)*(xp - x) > 4 and (yp - y)*(yp - y) > 4) or (hp - h)*(hp - h) >= 1: # print "x: %d y: %d hsv : (%d, %d, %d)" % (x,y,h,s,v) # xp = x # yp = y # hp = h if (self.ui.radioDepth.isChecked()): self.ui.videoFrame.setPixmap(self.video.convertDepthFrame()) """ Update GUI Joint Coordinates Labels """ self.rex.rexarm_FK() self.ui.rdoutBaseJC.setText( str("%.2f" % (self.rex.joint_angles_fb[0] * R2D))) self.ui.rdoutShoulderJC.setText( str("%.2f" % (self.rex.joint_angles_fb[1] * R2D))) self.ui.rdoutElbowJC.setText( str("%.2f" % (self.rex.joint_angles_fb[2] * R2D))) self.ui.rdoutWristJC.setText( str("%.2f" % (self.rex.joint_angles_fb[3] * R2D))) self.ui.rdoutX.setText(str("%.2f" % (self.rex.endeffector_global[0]))) self.ui.rdoutY.setText(str("%.2f" % (self.rex.endeffector_global[1]))) self.ui.rdoutZ.setText(str("%.2f" % (self.rex.endeffector_global[2]))) self.ui.rdoutT.setText(str("%.2f" % (self.rex.endeffector_global[3]))) """ Mouse position presentation in GUI TODO: after implementing workspace calibration display the world coordinates the mouse points to in the RGB video image. """ x = QtGui.QWidget.mapFromGlobal(self, QtGui.QCursor.pos()).x() y = QtGui.QWidget.mapFromGlobal(self, QtGui.QCursor.pos()).y() if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): self.ui.rdoutMousePixels.setText("(-,-,-)") self.ui.rdoutMouseWorld.setText("(-,-,-)") else: x = x - MIN_X y = y - MIN_Y z = 0. world_z = 0. rw = np.array([[0], [0], [0]]) self.ui.rdoutMousePixels.setText("(%.0f,%.0f,%f)" % (x, y, z)) if (self.video.cal_flag == 2): M = self.video.aff_matrix M_RGBtoDEP = self.video.aff_matrix_RGBtoDEP v = np.array([[x], [y], [1]]) DEP_xy = np.dot(M_RGBtoDEP, v) if ((DEP_xy[0] >= 0) and (DEP_xy[0] <= 640) and (DEP_xy[1] >= 0) and (DEP_xy[1] <= 480)): world_z = self.video.depth - 123.6 * np.tan( self.video.currentDepthFrame[int(DEP_xy[1])][int( DEP_xy[0])] / 2842.5 + 1.1863) z = self.video.currentDepthFrame[int(DEP_xy[1])][int( DEP_xy[0])] rw = np.dot( np.linalg.inv(self.video.WORLDtoRGB[:, [0, 1, 3]]), np.subtract(v, self.video.WORLDtoRGB[:, 2:3] * (-world_z))) else: z = 0.0 if rw[2] != 0: self.ui.rdoutMouseWorld.setText( "(%.0f,%.0f, %.0f)" % (rw[0] / rw[2], rw[1] / rw[2], world_z)) else: self.ui.rdoutMouseWorld.setText("(-,-,-)") else: self.ui.rdoutMouseWorld.setText("(-,-,-)") """ Updates status label when rexarm playback is been executed. This can be extended to include other appropriate messages """ if (self.rex.plan_status == 1): self.ui.rdoutStatus.setText("Playing Back - Waypoint %d" % (self.rex.wpt_number + 1)) def sliderChange(self): """ Function to change the slider labels when sliders are moved and to command the arm to the given position """ self.ui.rdoutBase.setText(str(self.ui.sldrBase.value())) self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value())) self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value())) self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value())) # self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value())) # self.ui.rdoutGrip2.setText(str(self.ui.sldrGrip2.value())) self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%") self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%") self.rex.torque_multiplier = self.ui.sldrMaxTorque.value() / 100.0 self.rex.speed_multiplier = self.ui.sldrSpeed.value() / 100.0 self.rex.joint_angles[0] = self.ui.sldrBase.value() * D2R self.rex.joint_angles[1] = self.ui.sldrShoulder.value() * D2R self.rex.joint_angles[2] = self.ui.sldrElbow.value() * D2R self.rex.joint_angles[3] = self.ui.sldrWrist.value() * D2R self.rex.joint_angles[4] = self.ui.sldrGrip1.value() * D2R self.rex.joint_angles[5] = self.ui.sldrGrip2.value() * D2R # self.rex.joint_angles[6] = self.ui.sldrGrip2.value()*D2R self.rex.cmd_publish() def mousePressEvent(self, QMouseEvent): """ Function used to record mouse click positions for calibration """ """ Get mouse posiiton """ x = QMouseEvent.x() y = QMouseEvent.y() """ If mouse position is not over the camera image ignore """ if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): return """ Change coordinates to image axis """ self.last_click[0] = x - MIN_X self.last_click[1] = y - MIN_Y """ If calibration has been performed """ if (self.video.cal_flag == 1): """ Save last mouse coordinate """ self.video.mouse_coord[self.video.mouse_click_id] = [(x - MIN_X), (y - MIN_Y)] """ Update the number of used poitns for calibration """ self.video.mouse_click_id += 1 """ Update status label text """ self.ui.rdoutStatus.setText("Affine Calibration: Click Point %d" % (self.video.mouse_click_id + 1)) """ If the number of click is equal to the expected number of points computes the affine calibration. LAB TASK: Change this code to use your workspace calibration routine and NOT the simple calibration function as is done now. """ if (self.video.mouse_click_id == 2 * self.video.cal_points): """ Update status of calibration flag and number of mouse clicks """ self.video.cal_flag = 2 self.video.mouse_click_id = 0 """ Perform affine calibration with OpenCV """ self.video.aff_matrix = cv2.getAffineTransform( self.video.mouse_coord[0:3], self.video.real_coord[0:3]) self.video.aff_matrix_RGBtoDEP = cv2.getAffineTransform( self.video.mouse_coord[0:3], self.video.mouse_coord[5:8]) self.video.aff_matrix_DEPtoRGB = cv2.getAffineTransform( self.video.mouse_coord[5:8], self.video.mouse_coord[0:3]) object_points = np.insert(self.video.real_coord, 2, [0, -38, -76, -76, -38], axis=1) image_points = np.array( self.video.mouse_coord[0:self.video.cal_points]) reval, rvec, tvec = cv2.solvePnP(object_points, image_points, self.video.intrinsic, self.video.distortion, cv2.SOLVEPNP_EPNP) self.video.WORLDtoRGB = np.dot( self.video.intrinsic, np.insert(cv2.Rodrigues(rvec)[0], [3], tvec, axis=1)) t = np.dot(self.video.WORLDtoRGB, np.array([[-100.0], [100.0], [-114.], [1.]])) self.video.WORLDtoRGB /= t[2] # print t[2] z1_temp = np.tan(self.video.currentDepthFrame[int( self.video.mouse_coord[5][1])][int( self.video.mouse_coord[5][0])] / 2842.5 + 1.1863) z2_temp = np.tan(self.video.currentDepthFrame[int( self.video.mouse_coord[6][1])][int( self.video.mouse_coord[6][0])] / 2842.5 + 1.1863) z3_temp = np.tan(self.video.currentDepthFrame[int( self.video.mouse_coord[7][1])][int( self.video.mouse_coord[7][0])] / 2842.5 + 1.1863) z4_temp = np.tan(self.video.currentDepthFrame[int( self.video.mouse_coord[8][1])][int( self.video.mouse_coord[8][0])] / 2842.5 + 1.1863) z5_temp = np.tan(self.video.currentDepthFrame[int( self.video.mouse_coord[9][1])][int( self.video.mouse_coord[9][0])] / 2842.5 + 1.1863) # print z1_temp # print z2_temp # print z3_temp # print z4_temp # self.video.depth = 123.6 * (z1_temp + z2_temp) / 2 self.video.depth = 123.6 * (z1_temp + z2_temp + z3_temp + z4_temp + z5_temp) / 5 + 45.5 """ Updates Status Label to inform calibration is done """ self.ui.rdoutStatus.setText("Waiting for input") def simple_cal(self): """ Function called when affine calibration button is called. Note it only chnages the flag to record the next mouse clicks and updates the status text label """ self.video.cal_flag = 1 self.ui.rdoutStatus.setText("Affine Calibration: Click Point %d" % (self.video.mouse_click_id + 1))