Ejemplo n.º 1
0
 def __init__(self, src):
     self._src = src
     self._kinect = Kinect()
     self._body = BodyDetector()
     self._hand = HandDetector()
     self._contour = HandContourDetector()
     self._pose = PoseClassifier(MultiPoseClassifier(src))
Ejemplo n.º 2
0
 def __init__(self, id, nsamples, dst):
     self._id       = id
     self._nsamples = nsamples
     self._dst      = dst
     self._kinect   = Kinect()
     self._body     = BodyDetector()
     self._hand     = HandDetector()
     self._contour  = HandContourDetector()
     self._feature  = GaborDescriptors(4, 4)
Ejemplo n.º 3
0
 def __init__(self, id, nsamples, dst):
     self._id = id
     self._nsamples = nsamples
     self._dst = dst
     self._kinect = Kinect()
     self._body = BodyDetector()
     self._hand = HandDetector()
     self._contour = HandContourDetector()
     self._fdesc = FourierDescriptors()
     self._train = []
Ejemplo n.º 4
0
 def __init__(self):
     self.addr = tools.get_random_uuid()
     self.engine = Engine()
     self.dev = Device("cli.experimental", self.addr)
     self.dev.vendor_id = 'IHSEV'
     self.dev.info = 'chute'
     self.engine.add_device(self.dev)
     self.threadlist = []
     self.fallEvent = threading.Event()
     self.pepper = Pepper(self)
     self.kinect = Kinect()
     self.sensfloor = Sensfloor(self.engine)
Ejemplo n.º 5
0
 def __init__(self):
     self.kinect = Kinect(SEGMENT_SIZE, ENGAGE_MIN, ENGAGE_MAX)
     socket_started = False
     self.socket_api = None
     while (not socket_started):
         try:
             self.socket_api = SocketAPI(TCP_IP, TCP_PORT, BUFFER_SIZE,
                                         ACK_TIMEOUT, TERMINATOR)
             socket_started = True
         except:
             print("Socket didn't start, waiting 3 seconds...")
             time.sleep(3)
     print("Connected!")
     self.HandModel = RealTimeHandRecognition("RH", 32, 2)
Ejemplo n.º 6
0
class TestHand():
    def __init__(self):
        self._kinect = Kinect()
        self._body = BodyDetector()
        self._hand = HandDetector(HandOtsu())
        self._contour = HandContourDetector()
        self._palm = PalmDetector()

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():

            cv2.imshow('rgb', cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
            cv2.imshow('depth', depth8)

            hand, mask = self._hand.run(depth, depth8)
            (_, _, crop) = self._contour.run(mask)

            if crop == None:
                continue

            cv2.imshow('hand', crop)

            hand = self._palm.run(hand, crop)
            if hand == None:
                continue

            cv2.imshow('hand final', hand)

            cv2.waitKey(10)

    def _crop(self, img, box):
        crop = img[box[1]:box[1] + box[3], box[0]:box[0] + box[2]]
        return crop
Ejemplo n.º 7
0
class TestPose():
    def __init__(self, src):
        self._src = src
        self._kinect = Kinect()
        self._body = BodyDetector()
        self._hand = HandDetector()
        self._contour = HandContourDetector()
        self._pose = PoseClassifier(MultiPoseClassifier(src))

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)

            if contour.any():
                self._contour.draw()
                print self._pose.run(contour)

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body = self._body.run(depth8)
        (hand, _) = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return np.array([])

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont
Ejemplo n.º 8
0
class TestPose():

    def __init__(self, src):
        self._src     = src
        self._kinect  = Kinect()
        self._body    = BodyDetector()
        self._hand    = HandDetector()
        self._contour = HandContourDetector()
        self._pose    = PoseClassifier(MultiPoseClassifier(src))

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)

            if contour.any():
                self._contour.draw()
                print self._pose.run(contour)

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body            = self._body.run(depth8)
        (hand, _)       = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return np.array([])

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont
Ejemplo n.º 9
0
 def __init__(self, src):
     self._src     = src
     self._kinect  = Kinect()
     self._body    = BodyDetector()
     self._hand    = HandDetector()
     self._contour = HandContourDetector()
     self._pose    = PoseClassifier(MultiPoseClassifier(src))
Ejemplo n.º 10
0
class TestHand():

    def __init__(self):
        self._kinect  = Kinect()
        self._body    = BodyDetector()
        self._hand    = HandDetector(HandOtsu())
        self._contour = HandContourDetector()
        self._palm    = PalmDetector()

    def run(self):
        for (depth, depth8, rgb) in self._kinect.get_data():

            cv2.imshow('rgb', cv2.cvtColor(rgb, cv2.COLOR_BGR2RGB))
            cv2.imshow('depth', depth8)

            hand, mask = self._hand.run(depth, depth8)
            (_, _, crop) = self._contour.run(mask)

            if crop == None:
                continue

            cv2.imshow('hand', crop)

            hand = self._palm.run(hand, crop)
            if hand == None:
                continue

            cv2.imshow('hand final', hand)

            cv2.waitKey(10)


    def _crop(self, img, box):
        crop = img[box[1]:box[1]+box[3], box[0]:box[0]+box[2]]
        return crop
Ejemplo n.º 11
0
    def __init__(self):
        """ Initializes the environment. """

        # the state space (=observation space) are all possible depth images of the kinect camera
        if (self.flattenImage):
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight * self.imageWidth],
                dtype=np.uint8)
        else:
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight, self.imageWidth],
                dtype=np.uint8)

        boundaries_r = [self.gripperRadiusMin, self.gripperRadiusMax]
        boundaries_phi = [0, np.pi]

        low = np.array([boundaries_r[0], boundaries_phi[0]])
        high = np.array([boundaries_r[1], boundaries_phi[1]])
        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)

        self.reward_range = (-np.inf, np.inf)

        self.seed()

        # create object for box (object to pick up)
        self.box = ObjectToPickUp(length=self.boxLength,
                                  width=self.boxWidth,
                                  height=self.boxHeight,
                                  gripperRadius=self.gripperRadiusMax)
        # create object for kinect
        self.kinect = Kinect(self.imageWidth,
                             self.imageHeight,
                             x=0.0,
                             y=0.0,
                             z=1.0)
        # create object for gripper
        self.gripper = Gripper(self.gripperDistance,
                               self.gripperWidth,
                               self.gripperHeight,
                               r=0.0,
                               phi=0.0)
Ejemplo n.º 12
0
    def __init__(self):
        """
		Initialize the window
		"""
        QtGui.QMainWindow.__init__(self)
        # Internal state
        self.__kinect = Kinect()  # This contains all the logic
        self.__objectData = None  # Keep this to avoid refresh issues on the image
        self.__exportDirectory = os.path.expanduser('~')  # Save directory
        # Initialize the UI
        self.__mainWidget = None
        self.__previewImage = None
        self.__objectImage = None
        self.__captureButton = None
        self.__exportButton = None
        self.__initUI()
        # Events
        self.__captureButton.clicked.connect(self.__onCapture)
        self.__exportButton.clicked.connect(self.__onExport)
Ejemplo n.º 13
0
 def __init__(self, id, nsamples, dst):
     self._id       = id
     self._nsamples = nsamples
     self._dst      = dst
     self._kinect   = Kinect()
     self._body     = BodyDetector()
     self._hand     = HandDetector()
     self._contour  = HandContourDetector()
     self._fdesc    = FourierDescriptors()
     self._train    = []
Ejemplo n.º 14
0
class TrainPose():

    def __init__(self, id, nsamples, dst):
        self._id       = id
        self._nsamples = nsamples
        self._dst      = dst
        self._kinect   = Kinect()
        self._body     = BodyDetector()
        self._hand     = HandDetector()
        self._contour  = HandContourDetector()
        self._fdesc    = FourierDescriptors()
        self._train    = []

    def run(self):
        warmup = True
        for (depth8, depth, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)
            if not contour:
                continue

            self._contour.draw()

            if warmup:
                key = cv2.waitKey(5)
                if key == GO:
                    warmup = False
                continue

            fd = self._fdesc.run(contour)
            self._train.append(fd)

            if len(self._train) == self._nsamples:
                self._save()
                break

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body = self._body.run(depth8)
        (hand, _) = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return []

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont

    def _save(self):
        data = np.array(self._train)
        model = EmpiricalCovariance().fit(np.array(self._train))
        output = {'id': self._id, 'data': data,  'model': model}
        pickle.dump(output, open(self._dst, 'wb'))
Ejemplo n.º 15
0
class TrainPose():
    def __init__(self, id, nsamples, dst):
        self._id = id
        self._nsamples = nsamples
        self._dst = dst
        self._kinect = Kinect()
        self._body = BodyDetector()
        self._hand = HandDetector()
        self._contour = HandContourDetector()
        self._fdesc = FourierDescriptors()
        self._train = []

    def run(self):
        warmup = True
        for (depth8, depth, rgb) in self._kinect.get_data():
            contour = self._get_hand_contour(depth8, depth, rgb)
            if not contour:
                continue

            self._contour.draw()

            if warmup:
                key = cv2.waitKey(5)
                if key == GO:
                    warmup = False
                continue

            fd = self._fdesc.run(contour)
            self._train.append(fd)

            if len(self._train) == self._nsamples:
                self._save()
                break

            cv2.waitKey(5)

    def _get_hand_contour(self, depth8, depth, rgb):
        body = self._body.run(depth8)
        (hand, _) = self._hand.run(body)
        (cont, box, hc) = self._contour.run(hand)

        if self._contour.not_valid():
            return []

        (cont, _, _) = self._contour.run(rgb, True, box, hc, depth)

        return cont

    def _save(self):
        data = np.array(self._train)
        model = EmpiricalCovariance().fit(np.array(self._train))
        output = {'id': self._id, 'data': data, 'model': model}
        pickle.dump(output, open(self._dst, 'wb'))
Ejemplo n.º 16
0
class TrainPose():

    def __init__(self, id, nsamples, dst):
        self._id       = id
        self._nsamples = nsamples
        self._dst      = dst
        self._kinect   = Kinect()
        self._body     = BodyDetector()
        self._hand     = HandDetector()
        self._contour  = HandContourDetector()
        self._feature  = GaborDescriptors(4, 4)

    def run(self):
        warmup = True
        train = []
        model = pickle.load(open('svm.pck', 'rb'))

        for (depth, depth8, rgb) in self._kinect.get_data():
            body      = self._body.run(depth8)
            (hand, _) = self._hand.run(body)
            (cont, box, crop) = self._contour.run(hand)

            hand = hand[box[1]:box[1]+box[3], box[0]:box[0]+box[2]]

            cv2.imshow('hand', hand)
            key = cv2.waitKey(2)

            #if warmup:
            #    if key == GO:
            #        warmup = False
            #    continue

            #if key != 97:
            #    continue

            feature = self._feature.run(hand)
            print model.predict(feature)
            #train.append(feature)

            #if len(train) == self._nsamples:
            #    self._save(train)
            #    break

            #cv2.waitKey(2)

    def _save(self, train):
        data = np.array(train)
        labels = self._id * np.ones(len(train), np.int)
        output = {'labels': labels, 'data': data}
        pickle.dump(output, open(self._dst, 'wb'))
Ejemplo n.º 17
0
    def __init__(self):
        self._paused = True
        self._kinect = Kinect()

        gtk.Window.__init__(self)
        self.set_default_size(1280, 960)

        vbox = gtk.VBox()
        self.add(vbox)

        # Kinect info visualisation.
        self._display = KinectDisplay(self._kinect)
        vbox.pack_start(self._display, True, True, 0)

        hbox = gtk.HBox()
        vbox.pack_start(hbox)

        # Game scheme representation.
        game_scene = GameSceneArea(self._kinect)
        self._display.add_observer(game_scene)
        hbox.pack_start(game_scene)

        button_vbox = gtk.VBox()
        hbox.pack_start(button_vbox)

        # Choose static data.
        self.choose = gtk.Button('Open', gtk.STOCK_OPEN)
        button_vbox.pack_start(self.choose)
        self.choose.connect("clicked", self._choose_cb)

        # Save button.
        self.save = gtk.Button('Save', gtk.STOCK_SAVE)
        self.save.set_sensitive(False)
        button_vbox.pack_start(self.save)
        self.save.connect("clicked", self._save_cb)

        # Pause/Autorefresh button.
        self.pause = gtk.Button('Pause', gtk.STOCK_MEDIA_PAUSE)
        button_vbox.pack_start(self.pause)
        self.pause.connect("clicked", self._pause_cb)

        self.connect("destroy", gtk.main_quit)
        self.show_all()

        # Auto-refresh at 10 frames per seconds.
        self.timer_id = gobject.timeout_add(self.REFRESH_DELAY,
                self._timedout)
Ejemplo n.º 18
0
class DepthClient:
    def __init__(self):
        self.kinect = Kinect(SEGMENT_SIZE, ENGAGE_MIN, ENGAGE_MAX)
        socket_started = False
        self.socket_api = None
        while (not socket_started):
            try:
                self.socket_api = SocketAPI(TCP_IP, TCP_PORT, BUFFER_SIZE,
                                            ACK_TIMEOUT, TERMINATOR)
                socket_started = True
            except:
                print("Socket didn't start, waiting 3 seconds...")
                time.sleep(3)
        print("Connected!")
        self.HandModel = RealTimeHandRecognition("RH", 32, 2)

    def run(self):
        while True:
            frames = self.kinect.get()
            if frames is None:
                print("waiting for frames...")
                time.sleep(1 / 30)
                continue
            (LH_probs, LH_out), (RH_probs, RH_out) = self.HandModel.classifyLR(
                frames[0], frames[1])
            LH_idx = np.argmax(LH_out)
            LH_label = hand_postures[LH_idx]
            RH_idx = np.argmax(RH_out)
            RH_label = hand_postures[RH_idx]
            #self.socket_api.send_to_server("user:hands:left:probs", LH_out)
            #self.socket_api.set("user:hands:left:argmax", int(LH_idx))
            self.socket_api.set("user:hands:left", LH_label)
            #self.socket_api.send_to_server("user:hands:right:probs", RH_out)
            #self.socket_api.set("user:hands:right:argmax", int(RH_idx))
            self.socket_api.set("user:hands:right", RH_label)
            print(LH_label, RH_label)
Ejemplo n.º 19
0
class Controller:
    def __init__(self):
        self.nao = NAO(self)
        self.kinect = Kinect()
        self.xaalproxy = xAALProxy()

    def run(self):
        while raw_input("continu ? (Y/N)") == "Y":
            if self.kinect.fallDetection():
                self.nao.say("detecter la chute")
                self.nao.say("detecter la chute")
                self.nao.moveToPerson()
                self.nao.verifyPersonState(5)
                if not self.nao.getVerifyState() % 2:
                    self.nao.say("Ne t'inquiete pas, je vais demander a quelqu'un pour t'aider")
                    print "verifystate ", self.nao.getVerifyState()
                    self.scenario()
                    self.nao.response(0)
                    self.nao.say("merci d'etre venu")
                else:
                    self.nao.say("tres bien")

    def waitPersonResponse(self, timeout):
        count = 0
        self.nao.resetVerifyState()
        while not self.nao.getVerifyState():
            time.sleep(3)
            count += 1
            if (timeout != 0) and (count > timeout):
                print "waitpersonresponse timeout!"
                return False
        print "getpersonresponse"
        return self.nao.getVerifyState() % 2

    def sendMail(self):
        link = self.webRTCaddress()
        topic = "nao_robot/camera/top/camera/image_raw"
        text = "please see the video from this address " + link
        text = text + " with topic name " + topic
        mail = Mail(text)
        mail.sendMail()

    def webRTCaddress(self):
        ni.ifaddresses("wlan0")
        ip = ni.ifaddresses("wlan0")[2][0]["addr"] + ":8080"
        print "HostIP : ", ip
        return ip

    def scenario(self):
        self.smartDeviceAction("shutterleft", "up")
        self.smartDeviceAction("shutterright", "up")
        self.smartDeviceAction("lamp1", "on")
        self.smartDeviceAction("lamp2", "on")
        self.smartDeviceAction("lamp3", "on")
        self.nao.say("porte ouverte, volet remonte")
        self.smartDeviceAction("switch", "off")
        self.nao.say("j'ai eteint l'electricite")
        try:
            self.sendMail()
        except:
            print "mail send failed"
        self.nao.say("j'ai envoye le mail a vos proches")
        self.smartDeviceAction(
            "mobilephone", "inform", "msg", "J'ai detecte un probleme. Votre ami a fait un malaise. Venez l'aider."
        )
        self.nao.say("message vocal envoye")

    def smartDeviceAction(self, device, action, key=None, value=None):
        self.xaalproxy.sendmsg(device, action, key, value)
Ejemplo n.º 20
0
 def __init__(self):
     self._kinect = Kinect()
     self._body = BodyDetector()
     self._hand = HandDetector(HandOtsu())
     self._contour = HandContourDetector()
     self._palm = PalmDetector()
#!/usr/bin/env python

from kinect import Kinect
import rospy

#cont=0
kin = Kinect()

def listener(kin):
	#rospy.init_node('listener', anonymous=True)

	# Get values
	#for i in xrange(10):
   	#print cont, kin.get_posture()
   	
   	print kin.get_posture()
   	#cont+=1

    # spin() simply keeps python from exiting until this node is stopped
  	rospy.spin()

if __name__=='__main__':
	# Init the Kinect object
	listener(kin)
Ejemplo n.º 22
0
        return (self._interval[0], self._interval[0] + i)

    def _normalize(self, x):
        xn = x / x.sum()
        return xn

    def show(self):
        if self._body.any() and self._depth.any():
            img = np.hstack((self._depth, self._body))
            str = 'th1=%d th2=%d' %(self._interval[0], self._interval[1])
            cv2.imshow(str, img)


if __name__ == '__main__':
    from kinect import Kinect

    kinect = Kinect()
    body = BodyDetector()
    

    for (d, _) in kinect.get_data():

        b = body.run(d)

        #body.show()

        if kinect.stop(kinect.wait()):
            break

        #import sys; sys.stdin.read(1)
                        -1)

    print(cam_locs)
    cam_locs = np.concatenate([
        cam_locs[..., ::-1], depth[cam_locs[:, 0], cam_locs[:, 1]].reshape(
            -1, 1)
    ], -1)

    valid = np.logical_and(cam_locs[:, -1] > 200, cam_locs[:, -1] < 4000)

    return delta_image, cam_locs[valid], proj_locs[valid]


w = Fullscreen_Window()
w.clear()
k = Kinect()
k.start()

a = 0

all_cam_locs, all_proj_locs = [], []
for i in range(20):
    im, cam_locs, proj_locs = get_me_some_correspondances(k, w)
    im += 0.5

    all_cam_locs.append(cam_locs)
    all_proj_locs.append(proj_locs)

    print(cam_locs)

    plt.imshow(im)
Ejemplo n.º 24
0
import time
from kinect import Kinect, X, Y, Z
from midi_interface import MidiInterface
from utilities import sprout, scale_constrain
from events import Events

key = 60
beat = 0.125
kinect = Kinect.retrieve()
midi = MidiInterface.retrieve()
min_piano_dur = 0.15
min_piano_up = 0.05
events = Events.retrieve()
Ejemplo n.º 25
0
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        """ Groups of ui commonents """
        self.error_lcds = [
            self.ui.rdoutBaseErrors, self.ui.rdoutShoulderErrors,
            self.ui.rdoutElbowErrors, self.ui.rdoutWristErrors,
            self.ui.rdoutWrist2Errors, self.ui.rdoutWrist3Errors
        ]
        self.joint_readouts = [
            self.ui.rdoutBaseJC, self.ui.rdoutShoulderJC, self.ui.rdoutElbowJC,
            self.ui.rdoutWristJC, self.ui.rdoutWrist2JC, self.ui.rdoutWrist3JC
        ]
        self.joint_slider_rdouts = [
            self.ui.rdoutBase, self.ui.rdoutShoulder, self.ui.rdoutElbow,
            self.ui.rdoutWrist, self.ui.rdoutWrist2, self.ui.rdoutWrist3
        ]
        self.joint_sliders = [
            self.ui.sldrBase, self.ui.sldrShoulder, self.ui.sldrElbow,
            self.ui.sldrWrist, self.ui.sldrWrist2, self.ui.sldrWrist3
        ]
        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm()
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
        self.sm.is_logging = False
        """
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        # Video
        self.ui.videoDisplay.setMouseTracking(True)
        self.ui.videoDisplay.mouseMoveEvent = self.trackMouse
        self.ui.videoDisplay.mousePressEvent = self.calibrateMousePress
        # Buttons
        # Handy lambda function that can be used with Partial to only set the new state if the rexarm is initialized
        nxt_if_arm_init = lambda next_state: self.sm.set_next_state(
            next_state if self.rexarm.initialized else None)
        self.ui.btn_estop.clicked.connect(self.estop)
        self.ui.btn_init_arm.clicked.connect(self.initRexarm)
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(partial(nxt_if_arm_init, 'calibrate'))
        ##OUR_CODE
        self.ui.btn_exec.clicked.connect(self.execute)
        self.ui.btnUser2.clicked.connect(self.record)
        self.ui.btnUser3.clicked.connect(self.playback)
        self.ui.btnUser4.clicked.connect(self.execute_tp)
        self.ui.btnUser5.clicked.connect(self.toggle_logging)
        self.ui.btnUser1.clicked.connect(self.calibrate)
        self.ui.btnUser6.clicked.connect(self.blockDetect)
        self.ui.btnUser7.clicked.connect(self.openGripper)
        self.ui.btnUser8.clicked.connect(self.closeGripper)
        self.ui.btnUser9.clicked.connect(self.clickGrab)
        # Sliders
        for sldr in self.joint_sliders:
            sldr.valueChanged.connect(self.sliderChange)
        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        # Direct Control
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        # Status
        self.ui.rdoutStatus.setText("Waiting for input")
        # Auto exposure
        self.ui.chkAutoExposure.stateChanged.connect(self.autoExposureChk)
        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)
        """Setup Threads"""
        # Rexarm runs its own thread

        # Video
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)
        self.videoThread.start()

        # State machine
        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()

        # Display
        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(
            self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(
            self.updateStatusMessage)
        self.displayThread.updateJointErrors.connect(self.updateJointErrors)
        self.displayThread.start()
Ejemplo n.º 26
0
from kinect import Kinect
new_kinect=Kinect()

while True:
    coords=new_kinect.update_xy()
    print str(coords)
    if coords==None:
        new_kinect.select_skeleton()
Ejemplo n.º 27
0
import time
from kinect import Kinect, X, Y, Z
from midi_interface import MidiInterface
from utilities import sprout, scale_constrain
from events import Events

key = 60
beat = 0.125
kinect = Kinect.retrieve()
midi = MidiInterface.retrieve()
min_piano_dur = 0.15
min_piano_up = 0.05
events = Events.retrieve()
Ejemplo n.º 28
0
class Gui(QMainWindow):
    """ 
    Main GUI Class
    contains the main function and interfaces between 
    the GUI and functions
    """
    def __init__(self,parent=None):
        QWidget.__init__(self,parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self,True)

        """
        Dynamixel bus
        TODO: add other motors here as needed with their correct address"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)
        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 1)
        shld = DXL_MX(port_num, 2)
        elbw = DXL_MX(port_num, 3)
        wrst = DXL_AX(port_num, 4)
        wrst2 = DXL_AX(port_num, 5)
        wrst3 = DXL_XL(port_num, 6)
        gripper = DXL_XL(port_num, 7)

        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm((base,shld,elbw,wrst,wrst2,wrst3),gripper)
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
    
        """ 
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        self.ui.btn_exec.clicked.connect(self.execute)


        self.ui.btn_estop.clicked.connect(self.estop)
        
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(partial(self.sm.set_next_state, "calibrate"))
        
        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist2.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist3.valueChanged.connect(self.sliderChange)
        #self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)

        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")

        """Team10 section for buttons"""
        self.ui.btnUser2.setText("teach")
        self.ui.btnUser2.clicked.connect(partial(self.sm.set_next_state, "teach"))

        self.ui.btnUser3.setText("repeat")
        self.ui.btnUser3.clicked.connect(partial(self.sm.set_next_state, "repeat"))
        
        self.ui.btnUser4.setText("Set ROI")
        self.ui.btnUser4.clicked.connect(partial(self.sm.set_next_state, "set_roi"))

        self.ui.btnUser5.setText("Set Exclusion")
        self.ui.btnUser5.clicked.connect(partial(self.sm.set_next_state, "set_exclusion"))

        self.ui.btnUser6.setText("Save frames")
        self.ui.btnUser6.clicked.connect(partial(self.sm.set_next_state, "save_frames"))


        self.ui.btn_task3.clicked.connect(partial(self.sm.set_next_state, "task3"))

        self.ui.btnUser7.setText("Click & Grab")
        self.ui.btnUser7.clicked.connect(partial(self.sm.set_next_state, "ClickandGrab"))
 

        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)

        """initalize rexarm"""
        self.rexarm.initialize()

        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)        
        self.videoThread.start()

        
        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()
        

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(self.updateStatusMessage)
        self.displayThread.start()

        """ 
        Setup Timer 
        this runs the trackMouse function every 50ms
        """
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)

    """ Slots attach callback functions to signals emitted from threads"""

    @pyqtSlot(QImage, QImage, QImage, QImage)
    def setImage(self, rgb_image, depth_image, level_image, superpose_frame):
        if(self.ui.radioVideo.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(rgb_image))
        if(self.ui.radioDepth.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(depth_image))
        if(self.ui.radioUsr2.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(level_image))
        if(self.ui.radioUsr1.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(superpose_frame))

    @pyqtSlot(list)
    def updateJointReadout(self, joints):
        self.ui.rdoutBaseJC.setText(str("%+.2f" % (joints[0]*R2D)))
        self.ui.rdoutShoulderJC.setText(str("%+.2f" % ((joints[1]*R2D)+90.0)))
        self.ui.rdoutElbowJC.setText(str("%+.2f" % (joints[2]*R2D)))
        self.ui.rdoutWristJC.setText(str("%+.2f" % (joints[3]*R2D)))
        self.ui.rdoutWrist2JC.setText(str("%+.2f" % (joints[4]*R2D)))
        self.ui.rdoutWrist3JC.setText(str("%+.2f" % (joints[4]*R2D)))

        if(len(joints)>5):
            self.ui.rdoutWrist3JC.setText(str("%+.2f" % (joints[5]*R2D)))

        else:
            self.ui.rdoutWrist3JC.setText(str("N.A."))

    @pyqtSlot(list)
    def updateEndEffectorReadout(self, pos):
        self.ui.rdoutX.setText(str("%+.2f" % (pos[0])))
        self.ui.rdoutY.setText(str("%+.2f" % (pos[1])))
        self.ui.rdoutZ.setText(str("%+.2f" % (pos[2])))
        self.ui.rdoutT.setText(str("%+.2f" % (pos[3])))
        self.ui.rdoutG.setText(str("%+.2f" % (pos[4])))
        self.ui.rdoutP.setText(str("%+.2f" % (pos[5])))

    @pyqtSlot(str)
    def updateStatusMessage(self, msg):
        self.ui.rdoutStatus.setText(msg)


    """ Other callback functions attached to GUI elements"""
    def execute(self):
        #self.rexarm.set_positions()
        self.sm.set_next_state("execute")

    def estop(self):
        self.rexarm.estop = True
        self.sm.set_next_state("estop")

    def sliderChange(self):
        """ 
        Function to change the slider labels when sliders are moved
        and to command the arm to the given position
        """
        self.ui.rdoutBase.setText(str(self.ui.sldrBase.value()))
        self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value()))
        self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value()))
        self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value()))
        self.ui.rdoutWrist2.setText(str(self.ui.sldrWrist2.value()))
        self.ui.rdoutWrist3.setText(str(self.ui.sldrWrist3.value()))
        #self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value()))

        self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%")
        self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%")
        self.rexarm.set_torque_limits([self.ui.sldrMaxTorque.value()/100.0]*self.rexarm.num_joints, update_now = False)
        self.rexarm.set_speeds_normalized_global(self.ui.sldrSpeed.value()/100.0, update_now = False)
        joint_positions = np.array([self.ui.sldrBase.value()*D2R, 
                           self.ui.sldrShoulder.value()*D2R,
                           self.ui.sldrElbow.value()*D2R,
                           self.ui.sldrWrist.value()*D2R,
                           self.ui.sldrWrist2.value()*D2R,
                           self.ui.sldrWrist3.value()*D2R])
        self.rexarm.set_positions(joint_positions, update_now = False)
        #self.rexarm.gripper.set_position(self.ui.sldrGrip1.value()*D2R)

    def directControlChk(self, state):
        if state == Qt.Checked:
            self.sm.set_next_state("manual")
            self.ui.SliderFrame.setEnabled(True)
        else:
            self.sm.set_next_state("idle")
            self.ui.SliderFrame.setEnabled(False)

    def trackMouse(self):
        """ 
        Mouse position presentation in GUI
        TODO: after implementing workspace calibration 
        display the world coordinates the mouse points to 
        in the RGB video image.
        """

        x = QWidget.mapFromGlobal(self,QCursor.pos()).x()
        y = QWidget.mapFromGlobal(self,QCursor.pos()).y()
        if ((x < MIN_X) or (x >= MAX_X) or (y < MIN_Y) or (y >= MAX_Y)):
            self.ui.rdoutMousePixels.setText("(-,-,-)")
            self.ui.rdoutMouseWorld.setText("(-,-,-)")
        else:
            x = x - MIN_X
            y = y - MIN_Y
            if(self.kinect.currentDepthFrame.any() != 0):
                z = self.kinect.currentDepthFrame[y][x]
                self.ui.rdoutMousePixels.setText("(%.0f,%.0f,%.0f)" % (x,y,z))
                PointCameraFrm = self.kinect.ConvertImagePointToCameraFrame(np.array([x,y]))
                PointWorldFrm = self.kinect.ConvertCameraFrametoWorlFrame(PointCameraFrm)
                #self.ui.rdoutMouseWorld.setText("(-,-,-)")
                self.ui.rdoutMouseWorld.setText("(%.3f,%.3f,%.3f)" % (PointWorldFrm[0],PointWorldFrm[1],PointWorldFrm[2]))

    def mousePressEvent(self, QMouseEvent):
        """ 
        Function used to record mouse click positions for calibration 
        """
 
        """ Get mouse posiiton """
        x = QMouseEvent.x()
        y = QMouseEvent.y()

        """ If mouse position is not over the camera image ignore """
        if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): return

        """ Change coordinates to image axis """
        self.kinect.last_click[0] = x - MIN_X 
        self.kinect.last_click[1] = y - MIN_Y
        if (self.kinect.kinectCalibrated == True):
            self.kinect.PointCamera_last_click = self.kinect.ConvertImagePointToCameraFrame(np.array([x - MIN_X, y - MIN_Y]))
            self.kinect.PointWorld_last_click  = self.kinect.ConvertCameraFrametoWorlFrame(self.kinect.PointCamera_last_click)
            for i in range(10):
                self.kinect.BlockCenter, self.kinect.BlockOrientation_last_click  = self.kinect.SelectBlock(np.array([ self.kinect.last_click[0], self.kinect.last_click[1]]))
                if not np.array_equal(self.kinect.BlockCenter,np.array([0.0, 0.0, 0.0])):
                    self.kinect.PointWorld_last_click  = self.kinect.BlockCenter
                    #print("Coodinates: "+str(self.kinect.PointWorld_last_click))
                    #print("Orientation: "+str(self.kinect.BlockOrientation_last_click))
                    break
                time.sleep(0.010)
        print("Wold point selected: "+str(self.kinect.PointWorld_last_click))
        print("Orientation: " +str(self.kinect.BlockOrientation_last_click))
        self.kinect.new_click = True
Ejemplo n.º 29
0
from kinect import Kinect
import time
from settings import RUN_TIME

k = Kinect()

#k.run(6)
#
# for m in means:
#     print m

# k.execute()
k.run(RUN_TIME)

Ejemplo n.º 30
0
class Gui(QMainWindow):
    """ 
    Main GUI Class
    contains the main function and interfaces between 
    the GUI and functions
    """
    def __init__(self,parent=None):
        QWidget.__init__(self,parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self,True)

        """dynamixel bus -- add other motors here"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)
        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 0)
        shld = DXL_MX(port_num, 1)
        elbw = DXL_MX(port_num, 2)
        wrst = DXL_AX(port_num, 3)
        wrst2 = DXL_XL(port_num, 4) # NIC 10/4
        grip = DXL_XL(port_num, 5) # NIC 10/4

        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm((base,shld,elbw,wrst,wrst2),grip) # NIC 10/4
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
    
        """ 
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        self.ui.btn_estop.clicked.connect(self.estop)
	self.ui.btn_exec.clicked.connect(partial(self.sm.set_next_state,"execute_plan"))
        self.ui.btn_task1.clicked.connect(partial(self.sm.set_next_state,"task_1"))
	self.ui.btn_task2.clicked.connect(partial(self.sm.set_next_state,"task_2"))
	self.ui.btn_task4.clicked.connect(partial(self.sm.set_next_state,"task_5"))
	#self.ui.btn_exec.clicked.connect(partial(self.sm.set_next_state,"execute"))
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(partial(self.sm.set_next_state, "calibrate"))
	self.ui.btnUser2.setText("Record Waypoints")
	self.ui.btnUser2.clicked.connect(partial(self.sm.set_next_state, "record"))
	self.ui.btnUser3.setText("Play")
	self.ui.btnUser3.clicked.connect(partial(self.sm.set_next_state, "play"))
	self.ui.btnUser4.setText("Open Gripper") # NIC 10/4
	self.ui.btnUser4.clicked.connect(self.rexarm.open_gripper) # NIC 10/4
	self.ui.btnUser5.setText("Close Gripper") # NIC 10/4
	self.ui.btnUser5.clicked.connect(self.rexarm.close_gripper) # NIC 10/4
        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip2.valueChanged.connect(self.sliderChange)
        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")

        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)

        """initalize rexarm"""
        self.rexarm.initialize()

        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)        
        self.videoThread.start()

        
        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()
        

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(self.updateStatusMessage)
        self.displayThread.start()

        """ 
        Setup Timer 
        this runs the trackMouse function every 50ms
        """
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)

    """ Slots attach callback functions to signals emitted from threads"""

    @pyqtSlot(QImage, QImage)
    def setImage(self, rgb_image, depth_image):
        if(self.ui.radioVideo.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(rgb_image))
        if(self.ui.radioDepth.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(depth_image))

    @pyqtSlot(list)
    def updateJointReadout(self, joints):
        self.ui.rdoutBaseJC.setText(str("%+.2f" % (joints[0]*R2D)))
        self.ui.rdoutShoulderJC.setText(str("%+.2f" % ((joints[1]*R2D)+90.0)))
        self.ui.rdoutElbowJC.setText(str("%+.2f" % (joints[2]*R2D)))
        self.ui.rdoutWristJC.setText(str("%+.2f" % (joints[3]*R2D)))

    @pyqtSlot(list)
    def updateEndEffectorReadout(self, pos):
        self.ui.rdoutX.setText(str("%+.2f" % (pos[0])))
        self.ui.rdoutY.setText(str("%+.2f" % (pos[1])))
        self.ui.rdoutZ.setText(str("%+.2f" % (pos[2])))
        self.ui.rdoutT.setText(str("%+.2f" % (pos[3])))

    @pyqtSlot(str)
    def updateStatusMessage(self, msg):
        self.ui.rdoutStatus.setText(msg)


    """ Other callback functions attached to GUI elements"""

    def estop(self):
        self.rexarm.estop = True
        self.sm.set_next_state("estop")

    def sliderChange(self):
        """ 
        Function to change the slider labels when sliders are moved
        and to command the arm to the given position
        """
        self.ui.rdoutBase.setText(str(self.ui.sldrBase.value()))
        self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value()))
        self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value()))
        self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value()))
        #enter the vale for the slider  :slider rdoutGrip2 and rdoutGrip2   
        self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%")
        self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%")
        self.rexarm.set_torque_limits([self.ui.sldrMaxTorque.value()/100.0]*self.rexarm.num_joints, update_now = False)
        self.rexarm.set_speeds_normalized_global(self.ui.sldrSpeed.value()/100.0, update_now = False)
        joint_positions = np.array([self.ui.sldrBase.value()*D2R, 
                           self.ui.sldrShoulder.value()*D2R,
                           self.ui.sldrElbow.value()*D2R,
                           self.ui.sldrWrist.value()*D2R,
                           self.ui.sldrGrip1.value()*D2R,
                           self.ui.sldrGrip2.value()*D2R])
        self.rexarm.set_positions(joint_positions, update_now = False)

    def directControlChk(self, state):
        if state == Qt.Checked:
            self.sm.set_next_state("manual")
            self.ui.SliderFrame.setEnabled(True)
        else:
            self.sm.set_next_state("idle")
            self.ui.SliderFrame.setEnabled(False)

    def trackMouse(self):
        """ 
        Mouse position presentation in GUI
        TODO: after implementing workspace calibration 
        display the world coordinates the mouse points to 
        in the RGB video image.
        """

        x = QWidget.mapFromGlobal(self,QCursor.pos()).x()
        y = QWidget.mapFromGlobal(self,QCursor.pos()).y()
        if ((x < MIN_X) or (x >= MAX_X) or (y < MIN_Y) or (y >= MAX_Y)):
            self.ui.rdoutMousePixels.setText("(-,-,-)")
            self.ui.rdoutMouseWorld.setText("(-,-,-)")
        else:
            x = x - MIN_X
            y = y - MIN_Y
            if(self.kinect.currentDepthFrame.any() != 0):
                z = self.kinect.currentDepthFrame[y][x]
                self.ui.rdoutMousePixels.setText("(%.0f,%.0f,%.0f)" % (x,y,z))
		if (self.kinect.kinectCalibrated):
		   zw = self.kinect.depthcalibration(z)
		   xwyw = self.kinect.pixeltoworldcoordinates(np.array([x,y,1]), z)
                   self.ui.rdoutMouseWorld.setText("(%.0f,%.0f,%.0f)" % (xwyw[0],xwyw[1],zw))

    def mousePressEvent(self, QMouseEvent):
        """ 
        Function used to record mouse click positions for calibration 
        """
 
        """ Get mouse posiiton """
        x = QMouseEvent.x()
        y = QMouseEvent.y()

        """ If mouse position is not over the camera image ignore """
        if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): return

        """ Change coordinates to image axis """
        self.kinect.last_click[0] = x - MIN_X 
        self.kinect.last_click[1] = y - MIN_Y
        self.kinect.new_click = True
Ejemplo n.º 31
0
from kinect import Kinect
import cv2

k = Kinect()

cv2.namedWindow("Raw")
cv2.namedWindow("Thresh")
cv2.namedWindow("Masked")
cv2.namedWindow("Mask")
cv2.namedWindow("Delta")

cv2.createTrackbar("tmin", "Thresh", k.tmin, 255, lambda: None)
cv2.createTrackbar("tmax", "Thresh", k.tmax, 255, lambda: None)

print("Kinect test")
print("C: Calibrate mask")
print("O: Set origin")

while 1:
    k.tmin = cv2.getTrackbarPos("tmin", "Thresh")
    k.tmax = cv2.getTrackbarPos("tmax", "Thresh")

    k.update()

    cv2.imshow("Raw", k.raw)
    cv2.imshow("Thresh", k.thresh)
    cv2.imshow("Masked", k.masked)
    cv2.imshow("Mask", k.mask)

    delta = k.masked
    cv2.circle(delta, k.origin, 8, 127, -1)
Ejemplo n.º 32
0
 def __init__(self):
     self._kinect  = Kinect()
     self._body    = BodyDetector()
     self._hand    = HandDetector(HandOtsu())
     self._contour = HandContourDetector()
     self._palm    = PalmDetector()
Ejemplo n.º 33
0
 def __init__(self):
     self.nao = NAO(self)
     self.kinect = Kinect()
     self.xaalproxy = xAALProxy()
Ejemplo n.º 34
0
import time
import pickle

from project_gl import Projector

width, height = 1920, 1080

M = pickle.load(open('direct_model/correspondances/one_matrix.pickle', 'rb'))
M = np.concatenate([M, [[0, 0, 0, 0]]], 0)

p = Projector(M,
              x_res=width,
              y_res=height,
              proj_x_res=1366,
              proj_y_res=768,
              monitor=1)

from kinect import Kinect
k = Kinect()
k.start()

# depth = 1000 * np.ones([height, width], dtype=np.float32)
while True:
    # rgb = 255*np.random.rand(height, width, 3)
    # rgb = np.random.randint(0, 255, size=[height, width, 3])
    rgb, depth = k.get_current_rgbd_frame()
    if p.draw_frame(255 - rgb[..., :3], depth):
        p.stop()

k.stop()
Ejemplo n.º 35
0
from maskrcnn_benchmark.config import cfg
from mask_rcnn_demo.predictor import COCODemo
import numpy as np
from kinect import Kinect

k = Kinect()
k.start()

config_file = "mask_rcnn_demo/maskrcnn-benchmark/configs/caffe2/e2e_mask_rcnn_R_50_FPN_1x_caffe2.yaml"

# update the config options with the config file
cfg.merge_from_file(config_file)
# manual override some options
cfg.merge_from_list(["MODEL.DEVICE", "cuda"])

coco_demo = COCODemo(
    cfg,
    min_image_size=800,
    confidence_threshold=0.7,
)
# load image and then run prediction

while True:
    print('a')
    image, d = k.get_current_rgbd_frame(copy=True)
    image = image[..., :3]
    print('d')
    predictions = coco_demo.run_on_opencv_image(image)
    print('c')
    print(predictions.shape, predictions.dtype)
Ejemplo n.º 36
0
class Main:
  def __init__(self, simulate, conn, envFile='environment.xml'):
    self.conn = conn #Conn is the link to our QT GUI which tells it when a traj is ready
    self.simulate = simulate #boolean which dictates whether PackBot will execute a traj

    self.env = Environment()
    self.env.SetViewer('qtcoin')
 
    self.env.Load(envFile) #contains the robot and the floor
    self.robot = self.env.GetRobots()[0]
    self.robot.SetActiveDOFs(range(5))
  
    self.taskmanip = TaskManipulation(self.robot) #used for controlling the gripper
    self.basemanip = BaseManipulation(self.robot) #used for controlling EOD arm

    self.simController = RaveCreateController(self.env,'IdealController')
 
    if self.simulate:
      self.robotWrapper = RobotWrapper(self.robot, self.simController, self.simController, True)
      self.robotWrapper.swapToSimController()
      self.grasp = Graspability(self.env, self.robotWrapper, lambda:self.robot.GetDOFValues()[:5])

      self.graspSelector = GraspInterface(self.env, self.simController, self.robotWrapper, self.grasp.getTraj, self.simulate)
    else:
      # real robot so we have a video stream to start
      self.videoThread = Thread(target=showVideo)
      self.videoThread.daemon = False
      self.videoThread.start()

      #Establishing a network connection to PackBot...
      self.controller = PackBotController(self.env)
      self.controller.InitializeCommunications()
      
      print 'giving time to make establish full connection to packbot'
      
      time.sleep(0.5)

      #initializes the arm callback
      self.robotWrapper = RobotWrapper(self.robot, self.simController, self.controller)
      self.robotWrapper.swapToRealController()

      time.sleep(.5)

      #graspability optimizes grasps to approach cans straight-on
      self.grasp = Graspability(self.env, self.robotWrapper, self.robot.GetDOFValues)
      #click-to-grasp callback thread
      self.graspSelector = GraspInterface(self.env, self.controller, self.robotWrapper, self.grasp.getTraj, self.simulate)


    #Custom UI elements which color graspable objects green and enable camera+kinect field of view shading
    self.highlight = HighlightGraspable(self.env, self.grasp.getTraj)
    self.cam = HeadCamera(self.env, self.robot)
    self.kinect = Kinect(self.env, self.robot)

    #Using the kinect+PCL to detect cylindrical objects and add them to the env
    self.detect = DetectCylinder(self.env, self.robot) 
    self.cylinders = self.detect.getCylinders()
    self.rawKinect = RawKinect(self.env, self.robot)

    #Setup the run variables
    self.runLock = Lock()
    self.run = True

    # create a lock and a 20Hz thread to handle various FoV elements
    self.DrawPackBotFoV = False
    self.ViewPackBotFoV = False
    self.DrawKinectFoV = False
    self.UpdateLock = Lock()
    self.foVUpdateThread  = Thread(target=self._FoVUpdate)
    self.foVUpdateThread.daemon = False
    self.foVUpdateThread.start()

    # We want to show the voxel grid by default
    self.ShowVoxelGrid = True

    # We don't want to see the point cloud by default
    self.ShowPointCloud = False

    # Initialize the number of voxels we want to draw
    self.numVoxelY = 100
    self.numVoxelX = 200
    self.numVoxelZ = 300

    if self.graspSelector.handle is None:
      print 'Callback was not registered -- grasp selection will not work'

  def Execute(self, traj=None):
    '''
    Execute the last valid trajectory saved in self.graspSelector

    Args:
         Traj: The trajectory to be executed. By default, this is none and
         will be parsed from the graspselector function (click2grip.py)

    Returns:
         Nothing. Runs a trajectory on the robot or in simulation
        
    '''     
    # save the currect active manip
    previousManip = self.robot.GetActiveManipulator()
 
    # set the active maninipulator to the gripper while we run
    manip = self.robot.SetActiveManipulator("gripper")   
    if traj:
      result = traj
    else:
      result = self.graspSelector.getTraj()
    if result:
      if self.simulate:
        self.robotWrapper.swapToSimController()
        self.taskmanip.ReleaseFingers()
        self.robot.WaitForController(0)
        self.robot.GetController().SetPath(result)
        self.robot.WaitForController(0)
        self.taskmanip.CloseFingers()
        self.robot.WaitForController(0)
        self.robotWrapper.swapToRealController()
      else:        
        print 'Running grasp in:'
        for t in ['3','2','1']:
          print t
          time.sleep(1)
	print 'GO'
        self.controller.Play()
        self.controller.GripOpen()
        print 'Wait for grip to open'
        time.sleep(3)
        self.controller.TestTrajectorySending(result)
        self.controller.GripClose()
    else:
      print 'No valid trajectory found'
    # reset the active manipulator 
    self.robot.SetActiveManipulator(previousManip)
     
  def parseCommand(self, command):
    '''
    parseCommands receives commands sent from the QTViewer
    
    Commmands are transmitted as a list.
    '''
    # TODO: make a dictionary of the valid commands with appropriate functions 
    print 'Received command for:', command[0]
    if command[0] == 'UpdateObstacleData':
      self.UpdateObstacleData()
    elif command[0] == 'UpdateCylinderData':
      self.UpdateCylinderData()
    elif command[0] == 'Plan':
      self.PlanTrajectories()
    elif command[0] == 'Execute':
      self.Execute()
    elif command[0] == 'Quit':
      self.quit()
    elif command[0] == 'Home':
      self.home()
    elif command[0] == 'DrawPackBotFoV':
      with self.UpdateLock:
        self.DrawPackBotFoV = command[1]
    elif command[0] == 'ViewPackBotFoV':
      with self.UpdateLock:
        self.ViewPackBotFoV = command[1]
    elif command[0] == 'DrawKinectFoV':
      with self.UpdateLock:
        self.DrawKinectFoV = command[1]
    elif command[0] == 'ShowVoxel':
      self.ShowVoxelGrid = command[1]
    elif command[0] == 'ShowPointCloud':
      self.ShowPointCloud = command[1]
    elif command[0] == 'UpdateNumVoxel':
      if command[2] == 'X':
        self.numVoxelX = command[1]
      elif command[2] == 'Y':
        self.numVoxelY = command[1]
      elif command[2] == 'Z':
        self.numVoxelZ = command[1]

  def home(self):
    '''
    Home the PackBot (move arm to a "Packed" pose) suitable for storage
    '''
    solution = [-0.0018, -2.9191, 2.381, -2.592, -1.5702]
    self.robot.SetActiveDOFValues(solution)
    traj = self.basemanip.MoveActiveJoints(goal=solution, outputtrajobj=True, 
                            maxiter=4000, steplength=0.01, maxtries=2, execute=False)
    self.controller.Play()
    self.controller.TestTrajectorySending(traj)
 
  def UpdateObstacleData(self):
    '''
    Update the obstacle data from the kinect
    '''
    self.rawKinect.clearData()
    self.rawKinect.setCylinders(self.cylinders)
    self.rawKinect.displayData(self.ShowVoxelGrid, self.ShowPointCloud, 
                            self.numVoxelY, self.numVoxelX, self.numVoxelZ)
    print 'Done obstacle update'

  def UpdateCylinderData(self):
    '''
    UpdateCylinderData clears the environment of cylinders and searches
    kinect data for cylinders again
    '''
    self.detect.deleteCylinders(self.cylinders)
    self.grasp.reset()
    self.graspSelector.saveTraj(None)
    self.detect.findCylinders()
    self.cylinders = self.detect.getCylinders()
    print 'Done cylinder update'

  def PlanTrajectories(self):
    '''
    Plan trajectories to the detected cylinders and color them appropriately.
    Graspable cylinders are green, all others are red.
    '''
    self.highlight.colorKinBodies(self.cylinders)
    print 'Done planning'
  
  def _FoVUpdate(self):
    '''
    Function to update various FoV elements at ~20Hz
    '''
    run = False
    with self.runLock:
      run = self.run
    if run:
      print 'Ready'
    count = 0
    while run:
      with self.UpdateLock:
        if self.ViewPackBotFoV:
          self.cam.enterFirstPerson()
        else:
          self.cam.returnToMainCam()
        if self.DrawPackBotFoV:
          self.cam.drawFoV()
        else:
          self.cam.clearFoV()
        if self.DrawKinectFoV:
          self.kinect.drawFoV()
        else:
          self.kinect.clearFoV()
        if count > 19:
          count = 0
          if self.graspSelector.getTraj():
            self.conn.send(['SetExecuteColor', 'green'])
          else:
            self.conn.send(['SetExecuteColor', 'red'])
        count += 1
      time.sleep(0.05)
      with self.runLock:
        run = self.run

  def start(self):
    '''
    Starts the main loop to check for updates from the gui and 
    initializes the cylinders and obstacle data
    '''
    run = False
    with self.runLock:
      run = self.run
    while run:
      command = self.conn.recv()
      self.parseCommand(command)
      with self.runLock:
        run = self.run

  def test(self):
    '''
    Function to test trajectory planning and target coloration
    '''
    self.cylinders = [self.env.GetKinBody('target'+str(i+1)) for i in range(1)]
    self.highlight.colorKinBodies(self.cylinders)
   
  def quit(self):
    '''
    Method to turn run to off and to cleanup the various threads
    '''
    with self.runLock:
      self.run = False
    self.foVUpdateThread.join()
Ejemplo n.º 37
0
class Gui(QMainWindow):
    """!
    Main GUI Class

    Contains the main function and interfaces between the GUI and functions.
    """
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        """ Groups of ui commonents """
        self.error_lcds = [
            self.ui.rdoutBaseErrors, self.ui.rdoutShoulderErrors,
            self.ui.rdoutElbowErrors, self.ui.rdoutWristErrors,
            self.ui.rdoutWrist2Errors, self.ui.rdoutWrist3Errors
        ]
        self.joint_readouts = [
            self.ui.rdoutBaseJC, self.ui.rdoutShoulderJC, self.ui.rdoutElbowJC,
            self.ui.rdoutWristJC, self.ui.rdoutWrist2JC, self.ui.rdoutWrist3JC
        ]
        self.joint_slider_rdouts = [
            self.ui.rdoutBase, self.ui.rdoutShoulder, self.ui.rdoutElbow,
            self.ui.rdoutWrist, self.ui.rdoutWrist2, self.ui.rdoutWrist3
        ]
        self.joint_sliders = [
            self.ui.sldrBase, self.ui.sldrShoulder, self.ui.sldrElbow,
            self.ui.sldrWrist, self.ui.sldrWrist2, self.ui.sldrWrist3
        ]
        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm()
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
        self.sm.is_logging = False
        """
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        # Video
        self.ui.videoDisplay.setMouseTracking(True)
        self.ui.videoDisplay.mouseMoveEvent = self.trackMouse
        self.ui.videoDisplay.mousePressEvent = self.calibrateMousePress
        # Buttons
        # Handy lambda function that can be used with Partial to only set the new state if the rexarm is initialized
        nxt_if_arm_init = lambda next_state: self.sm.set_next_state(
            next_state if self.rexarm.initialized else None)
        self.ui.btn_estop.clicked.connect(self.estop)
        self.ui.btn_init_arm.clicked.connect(self.initRexarm)
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(partial(nxt_if_arm_init, 'calibrate'))
        ##OUR_CODE
        self.ui.btn_exec.clicked.connect(self.execute)
        self.ui.btnUser2.clicked.connect(self.record)
        self.ui.btnUser3.clicked.connect(self.playback)
        self.ui.btnUser4.clicked.connect(self.execute_tp)
        self.ui.btnUser5.clicked.connect(self.toggle_logging)
        self.ui.btnUser1.clicked.connect(self.calibrate)
        self.ui.btnUser6.clicked.connect(self.blockDetect)
        self.ui.btnUser7.clicked.connect(self.openGripper)
        self.ui.btnUser8.clicked.connect(self.closeGripper)
        self.ui.btnUser9.clicked.connect(self.clickGrab)
        # Sliders
        for sldr in self.joint_sliders:
            sldr.valueChanged.connect(self.sliderChange)
        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        # Direct Control
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        # Status
        self.ui.rdoutStatus.setText("Waiting for input")
        # Auto exposure
        self.ui.chkAutoExposure.stateChanged.connect(self.autoExposureChk)
        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)
        """Setup Threads"""
        # Rexarm runs its own thread

        # Video
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)
        self.videoThread.start()

        # State machine
        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()

        # Display
        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(
            self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(
            self.updateStatusMessage)
        self.displayThread.updateJointErrors.connect(self.updateJointErrors)
        self.displayThread.start()

    """ Slots attach callback functions to signals emitted from threads"""

    @pyqtSlot(QImage, QImage)
    def setImage(self, rgb_image, depth_image):
        """!
        @brief      Display the images from the kinect.

        @param      rgb_image    The rgb image
        @param      depth_image  The depth image
        """
        if (self.ui.radioVideo.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(rgb_image))
        if (self.ui.radioDepth.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(depth_image))

    @pyqtSlot(list)
    def updateJointReadout(self, joints):
        for rdout, joint in zip(self.joint_readouts, joints):
            rdout.setText(str('%+.2f' % (joint * R2D)))

    @pyqtSlot(list)
    def updateEndEffectorReadout(self, pos):
        self.ui.rdoutX.setText(str("%+.2f" % (pos[0])))
        self.ui.rdoutY.setText(str("%+.2f" % (pos[1])))
        self.ui.rdoutZ.setText(str("%+.2f" % (pos[2])))
        self.ui.rdoutT.setText(str("%+.2f" % (pos[3])))
        self.ui.rdoutG.setText(str("%+.2f" % (pos[4])))
        self.ui.rdoutP.setText(str("%+.2f" % (pos[5])))

    @pyqtSlot(list)
    def updateJointErrors(self, errors):
        for lcd, error in zip(self.error_lcds, errors):
            lcd.display(error)

    @pyqtSlot(str)
    def updateStatusMessage(self, msg):
        self.ui.rdoutStatus.setText(msg)

    """ Other callback functions attached to GUI elements"""

    def estop(self):
        self.sm.set_next_state("estop")

    def execute(self):
        self.sm.set_next_state("execute")

    def record(self):
        self.sm.set_next_state("record")

    def playback(self):
        self.sm.set_next_state("playback")

    def execute_tp(self):
        self.sm.set_next_state("execute_tp")

    def calibrate(self):
        self.sm.set_next_state("calibrate")

    def blockDetect(self):
        self.kinect.blockDetector()

    def openGripper(self):
        self.rexarm.open_gripper()

    def closeGripper(self):
        self.rexarm.close_gripper()

    def clickGrab(self):
        self.sm.set_next_state("clickGrab")

    def toggle_logging(self):
        if not self.sm.is_logging:
            # with open('log_data.csv', 'a') as log_file:
            self.rexarm.log_file = open('log_data.csv', 'a')
            self.rexarm.csv_writer = csv.writer(self.rexarm.log_file,
                                                delimiter=',')
            self.sm.is_logging = True
        else:
            self.rexarm.log_file.close()
            self.sm.is_logging = False

    def sliderChange(self):
        """!
        @brief Slider changed

        Function to change the slider labels when sliders are moved and to command the arm to the given position
        """
        for rdout, sldr in zip(self.joint_slider_rdouts, self.joint_sliders):
            rdout.setText(str(sldr.value()))

        self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%")
        self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%")

        # Do nothing if the rexarm is not initialized
        if self.rexarm.initialized:
            self.rexarm.set_torque_limits(
                [self.ui.sldrMaxTorque.value() / 100.0] *
                self.rexarm.num_joints)
            self.rexarm.set_speeds_normalized_all(self.ui.sldrSpeed.value() /
                                                  100.0)
            joint_positions = np.array(
                [sldr.value() * D2R for sldr in self.joint_sliders])
            # Only send the joints that the rexarm has
            self.rexarm.set_positions(
                joint_positions[0:self.rexarm.num_joints])

    def directControlChk(self, state):
        """!
        @brief      Changes to direct control mode

                    Will only work if the rexarm is initialized.

        @param      state  State of the checkbox
        """
        if state == Qt.Checked and self.rexarm.initialized:
            # Go to manual and enable sliders
            self.sm.set_next_state("manual")
            self.ui.SliderFrame.setEnabled(True)
        else:
            # Lock sliders and go to idle
            self.sm.set_next_state("idle")
            self.ui.SliderFrame.setEnabled(False)
            self.ui.chk_directcontrol.setChecked(False)

    def autoExposureChk(self, state):
        """!
        @brief      Sets the Kinect auto exposer

        @param      state  State of the checkbox
        """
        if state == Qt.Checked and self.kinect.kinectConnected == True:
            self.kinect.toggleExposure(True)
        else:
            self.kinect.toggleExposure(False)

    def trackMouse(self, mouse_event):
        """!
        @brief      Show the mouse position in GUI

                    TODO: after implementing workspace calibration display the world coordinates the mouse points to in the RGB
                    video image.

        @param      mouse_event  QtMouseEvent containing the pose of the mouse at the time of the event not current time
        """
        # if self.kinect.DepthFrameRaw.any() != 0:
        #     self.ui.rdoutMousePixels.setText("(-,-,-)")
        #     self.ui.rdoutMouseWorld.setText("(-,-,-)")
        if self.kinect.cameraCalibrated:
            pixel = np.array([mouse_event.y(), mouse_event.x()])
            # cameraCoord = self.kinect.pixel2Camera(pixel)
            worldCoord = self.kinect.getWorldCoord(pixel)
            self.kinect.worldCoords = worldCoord
            # print(worldCoord)
            self.ui.rdoutMousePixels.setText(np.array2string(pixel))
            # self.ui.rdoutMouseWorld.setText(np.array2string((worldCoord * 100).astype(int)))
            self.ui.rdoutMouseWorld.setText(np.array2string((worldCoord)))

    def calibrateMousePress(self, mouse_event):
        """!
        @brief Record mouse click positions for calibration

        @param      mouse_event  QtMouseEvent containing the pose of the mouse at the time of the event not current time
        """
        """ Get mouse posiiton """
        pt = mouse_event.pos()
        self.kinect.last_click[0] = pt.x()
        self.kinect.last_click[1] = pt.y()
        self.kinect.new_click = True
        # print(self.kinect.last_click)

    def initRexarm(self):
        """!
        @brief      Initializes the rexarm.
        """
        self.sm.set_next_state('initialize_rexarm')
        self.ui.SliderFrame.setEnabled(False)
        self.ui.chk_directcontrol.setChecked(False)
Ejemplo n.º 38
0
  def __init__(self, simulate, conn, envFile='environment.xml'):
    self.conn = conn #Conn is the link to our QT GUI which tells it when a traj is ready
    self.simulate = simulate #boolean which dictates whether PackBot will execute a traj

    self.env = Environment()
    self.env.SetViewer('qtcoin')
 
    self.env.Load(envFile) #contains the robot and the floor
    self.robot = self.env.GetRobots()[0]
    self.robot.SetActiveDOFs(range(5))
  
    self.taskmanip = TaskManipulation(self.robot) #used for controlling the gripper
    self.basemanip = BaseManipulation(self.robot) #used for controlling EOD arm

    self.simController = RaveCreateController(self.env,'IdealController')
 
    if self.simulate:
      self.robotWrapper = RobotWrapper(self.robot, self.simController, self.simController, True)
      self.robotWrapper.swapToSimController()
      self.grasp = Graspability(self.env, self.robotWrapper, lambda:self.robot.GetDOFValues()[:5])

      self.graspSelector = GraspInterface(self.env, self.simController, self.robotWrapper, self.grasp.getTraj, self.simulate)
    else:
      # real robot so we have a video stream to start
      self.videoThread = Thread(target=showVideo)
      self.videoThread.daemon = False
      self.videoThread.start()

      #Establishing a network connection to PackBot...
      self.controller = PackBotController(self.env)
      self.controller.InitializeCommunications()
      
      print 'giving time to make establish full connection to packbot'
      
      time.sleep(0.5)

      #initializes the arm callback
      self.robotWrapper = RobotWrapper(self.robot, self.simController, self.controller)
      self.robotWrapper.swapToRealController()

      time.sleep(.5)

      #graspability optimizes grasps to approach cans straight-on
      self.grasp = Graspability(self.env, self.robotWrapper, self.robot.GetDOFValues)
      #click-to-grasp callback thread
      self.graspSelector = GraspInterface(self.env, self.controller, self.robotWrapper, self.grasp.getTraj, self.simulate)


    #Custom UI elements which color graspable objects green and enable camera+kinect field of view shading
    self.highlight = HighlightGraspable(self.env, self.grasp.getTraj)
    self.cam = HeadCamera(self.env, self.robot)
    self.kinect = Kinect(self.env, self.robot)

    #Using the kinect+PCL to detect cylindrical objects and add them to the env
    self.detect = DetectCylinder(self.env, self.robot) 
    self.cylinders = self.detect.getCylinders()
    self.rawKinect = RawKinect(self.env, self.robot)

    #Setup the run variables
    self.runLock = Lock()
    self.run = True

    # create a lock and a 20Hz thread to handle various FoV elements
    self.DrawPackBotFoV = False
    self.ViewPackBotFoV = False
    self.DrawKinectFoV = False
    self.UpdateLock = Lock()
    self.foVUpdateThread  = Thread(target=self._FoVUpdate)
    self.foVUpdateThread.daemon = False
    self.foVUpdateThread.start()

    # We want to show the voxel grid by default
    self.ShowVoxelGrid = True

    # We don't want to see the point cloud by default
    self.ShowPointCloud = False

    # Initialize the number of voxels we want to draw
    self.numVoxelY = 100
    self.numVoxelX = 200
    self.numVoxelZ = 300

    if self.graspSelector.handle is None:
      print 'Callback was not registered -- grasp selection will not work'
Ejemplo n.º 39
0
class SmartBotEnv(gym.Env):

    rewardSuccess = 500
    rewardFailure = 0
    rewardUnreachablePosition = -5  # TODO: do we need this anymore?
    # when set to True, the reward will be rewardSuccess if gripper could grasp the object, rewardFailure otherwise
    # when set to False, the reward will be calculated from the distance between the gripper & the position of success
    binaryReward = False

    # some algorithms (like the ddpg from /home/joel/Documents/gym-gazebo/examples/pincher_arm/smartbot_pincher_kinect_ddpg.py)
    # currently assume that the observation_space has shape (x,) instead of (220,300), so for those algorithms set this to True
    flattenImage = False

    state = np.array([])
    imageWidth = 320
    imageHeight = 160
    boxLength = 0.02
    boxWidth = 0.02
    boxHeight = 0.03
    gripperDistance = 0.032  # between the fingers
    gripperHeight = 0.035  # from base to finger tip
    gripperWidth = 0.03
    gripperRadiusMax = 0.2  # maximal distance between robot base and gripper on the floor
    gripperRadiusMin = 0.04  # minimal distance between robot base and gripper on the floor

    firstRender = True

    def __init__(self):
        """ Initializes the environment. """

        # the state space (=observation space) are all possible depth images of the kinect camera
        if (self.flattenImage):
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight * self.imageWidth],
                dtype=np.uint8)
        else:
            self.observation_space = spaces.Box(
                low=0,
                high=255,
                shape=[self.imageHeight, self.imageWidth],
                dtype=np.uint8)

        boundaries_r = [self.gripperRadiusMin, self.gripperRadiusMax]
        boundaries_phi = [0, np.pi]

        low = np.array([boundaries_r[0], boundaries_phi[0]])
        high = np.array([boundaries_r[1], boundaries_phi[1]])
        self.action_space = spaces.Box(low=low, high=high, dtype=np.float32)

        self.reward_range = (-np.inf, np.inf)

        self.seed()

        # create object for box (object to pick up)
        self.box = ObjectToPickUp(length=self.boxLength,
                                  width=self.boxWidth,
                                  height=self.boxHeight,
                                  gripperRadius=self.gripperRadiusMax)
        # create object for kinect
        self.kinect = Kinect(self.imageWidth,
                             self.imageHeight,
                             x=0.0,
                             y=0.0,
                             z=1.0)
        # create object for gripper
        self.gripper = Gripper(self.gripperDistance,
                               self.gripperWidth,
                               self.gripperHeight,
                               r=0.0,
                               phi=0.0)

    # __init__

    def seed(self, seed=None):
        """ Seeds the environment (for replicating the pseudo-random processes). """
        self.np_random, seed = seeding.np_random(seed)
        return [seed]

    # seed

    def reset(self):
        """ Resets the state of the environment and returns an initial observation."""
        # place box
        self.box.place(randomPlacement=True)

        # for testing purposes
        # self.box.place(randomPlacement=False, x=0.0, y=0.1, phi=np.pi/2)

        # get depth image
        image = self.kinect.getImage(self.box,
                                     filter=False,
                                     flattenImage=self.flattenImage,
                                     saveImage=True)

        self.state = image
        return self.state

    # reset

    def close(self):
        """ Closes the environment and shuts down the simulation. """
        logging.info("closing SmartBotEnv")
        super(gym.Env, self).close()

    # close

    stepcount = 0
    winkel = np.linspace(0, np.pi)

    def step(self, action):
        """ Executes the action (i.e. moves the arm to the pose) and returns the reward and a new state (depth image). """

        # determine if gripper could grasp the ObjectToPickUp
        gripperR = action[0].astype(np.float64)
        gripperPhi = action[1].astype(np.float64)

        # # for testing purposes
        # gripperR = 0.1
        # gripperPhi = self.winkel[self.stepcount]
        # self.stepcount += 1

        self.gripper.place(gripperR, gripperPhi)

        logging.debug("moving arm to position: [{0} {1}]".format(
            gripperR, gripperPhi))
        logging.debug("box position: [{0} {1} {2}]".format(
            self.box.pos[0], self.box.pos[1], self.box.phi))

        reward, graspSuccess = self.calculateReward()
        logging.debug("received reward: " + str(reward))

        # re-place object to pick up if grasp was successful
        # if(graspSuccess):
        self.box.place(randomPlacement=True)

        # get depth image
        image = self.kinect.getImage(self.box,
                                     filter=False,
                                     flattenImage=self.flattenImage,
                                     saveImage=True)

        self.state = image
        done = True
        info = {}

        return self.state, reward, done, info

    # step

    def calculateReward(self):
        """ Calculates the reward for the current timestep, according to the gripper position and the pickup position. 
            A high reward is given if the gripper could grasp the box (pickup) if it would close the gripper. """

        # check if center of gravity of box is between the gripper fingers (i.e. inside the ag-bg-cg-dg polygon)
        # see e.g.: https://stackoverflow.com/a/23453678
        bbPath_gripper = mplPath.Path(
            np.array([
                self.gripper.a, self.gripper.b, self.gripper.c, self.gripper.d
            ]))
        # one finger is between a & b, the other finger is between c & d
        cogBetweenFingers = bbPath_gripper.contains_point(
            (self.box.pos[0], self.box.pos[1]))
        logging.debug("center of gravity is between the fingers: {}".format(
            cogBetweenFingers))

        # check if both gripper fingers don't intersect with the box
        bbPath_box = mplPath.Path(
            np.array([self.box.a, self.box.b, self.box.c, self.box.d]))
        bbPath_gripper_left = mplPath.Path(
            np.array([self.gripper.a, self.gripper.b]))
        bbPath_gripper_right = mplPath.Path(
            np.array([self.gripper.c, self.gripper.d]))
        leftGripperCrashes = bbPath_box.intersects_path(bbPath_gripper_left,
                                                        filled=True)
        rightGripperCrashes = bbPath_box.intersects_path(bbPath_gripper_right,
                                                         filled=True)
        logging.debug("left gripper crashes: {}".format(leftGripperCrashes))
        logging.debug("right gripper crashes: {}".format(rightGripperCrashes))

        # if the center of gravity of the box is between the gripper fingers and none of the fingers collide with the box, we are able to grasp the box
        if (cogBetweenFingers and not leftGripperCrashes
                and not rightGripperCrashes):
            logging.info(
                "********************************************* grasping would be successful! *********************************************"
            )
            graspSuccess = True
        else:
            graspSuccess = False

        if (self.binaryReward):
            if (graspSuccess):
                return self.rewardSuccess, graspSuccess
            else:
                return self.rewardFailure, graspSuccess
        else:
            # calculate reward according to the distance from the gripper to the center of gravity of the box
            distance = np.linalg.norm(self.gripper.pos -
                                      self.box.pos)  # e.g. 0.025
            # invert the distance, because smaller distance == closer to the goal == more reward
            reward = 1.0 / (2 * distance)
            # scale the reward if grasping would be successful
            if (graspSuccess):
                reward = 50 * reward
            # elif(leftGripperCrashes or rightGripperCrashes): # "punishement" for crashing into the box
            #     reward = reward / 5
            reward = min(reward, 1000)
            return reward, graspSuccess
        # if

    # calculateReward

    def render(self, mode='human'):
        if (self.firstRender):
            # display stuff
            plt.ion()
            self.fig, self.ax = plt.subplots()
            self.ax.axis("equal")
            self.ax.set_xlim([
                -self.box.gripperRadius - self.box.length - 0.2,
                self.box.gripperRadius + self.box.length + 0.2
            ])
            self.ax.set_ylim([
                0 - self.box.length - 0.2,
                self.box.gripperRadius + self.box.length + 0.2
            ])
            self.gripperLeftPoly = patches.Polygon(
                [self.gripper.a, self.gripper.b], closed=True, color="black")
            self.gripperRightPoly = patches.Polygon(
                [self.gripper.c, self.gripper.d], closed=True, color="black")
            self.pickMeUpPoly = patches.Polygon(
                [self.box.a, self.box.b, self.box.c, self.box.d],
                closed=True,
                color="red")
            self.ax.add_artist(self.gripperLeftPoly)
            self.ax.add_artist(self.gripperRightPoly)
            self.ax.add_artist(self.pickMeUpPoly)
            self.firstRender = False
        # if
        plt.ion()
        plt.cla()
        self.gripperLeftPoly.set_xy([self.gripper.a, self.gripper.b])
        self.gripperRightPoly.set_xy([self.gripper.c, self.gripper.d])
        self.pickMeUpPoly.set_xy(
            [self.box.a, self.box.b, self.box.c, self.box.d])
        self.ax.add_artist(self.gripperLeftPoly)
        self.ax.add_artist(self.gripperRightPoly)
        self.ax.add_artist(self.pickMeUpPoly)
        # self.fig.canvas.draw()
        plt.pause(0.0001)
        plt.draw()
Ejemplo n.º 40
0
class KinectTestWindow(gtk.Window):

    REFRESH_DELAY = 500  # ms

    def __init__(self):
        self._paused = True
        self._kinect = Kinect()

        gtk.Window.__init__(self)
        self.set_default_size(1280, 960)

        vbox = gtk.VBox()
        self.add(vbox)

        # Kinect info visualisation.
        self._display = KinectDisplay(self._kinect)
        vbox.pack_start(self._display, True, True, 0)

        hbox = gtk.HBox()
        vbox.pack_start(hbox)

        # Game scheme representation.
        game_scene = GameSceneArea(self._kinect)
        self._display.add_observer(game_scene)
        hbox.pack_start(game_scene)

        button_vbox = gtk.VBox()
        hbox.pack_start(button_vbox)

        # Choose static data.
        self.choose = gtk.Button('Open', gtk.STOCK_OPEN)
        button_vbox.pack_start(self.choose)
        self.choose.connect("clicked", self._choose_cb)

        # Save button.
        self.save = gtk.Button('Save', gtk.STOCK_SAVE)
        self.save.set_sensitive(False)
        button_vbox.pack_start(self.save)
        self.save.connect("clicked", self._save_cb)

        # Pause/Autorefresh button.
        self.pause = gtk.Button('Pause', gtk.STOCK_MEDIA_PAUSE)
        button_vbox.pack_start(self.pause)
        self.pause.connect("clicked", self._pause_cb)

        self.connect("destroy", gtk.main_quit)
        self.show_all()

        # Auto-refresh at 10 frames per seconds.
        self.timer_id = gobject.timeout_add(self.REFRESH_DELAY,
                self._timedout)

    def _choose_cb(self, widget, data=None):
        # Create file chooser.
        dialog = gtk.FileChooserDialog("Open..",
                None,
                gtk.FILE_CHOOSER_ACTION_OPEN,
                (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL,
                    gtk.STOCK_OPEN, gtk.RESPONSE_OK))
        dialog.set_default_response(gtk.RESPONSE_OK)

        # Get only numpy arrays.
        filter = gtk.FileFilter()
        filter.set_name("Numpy arrays")
        filter.add_pattern("*_depth.npy")
        dialog.add_filter(filter)

        response = dialog.run()
        chosen = response == gtk.RESPONSE_OK
        if chosen:
            # Extract file basename.
            filename = dialog.get_filename()[:-10]
            basename = os.path.basename(filename)
            self._kinect.set_filename(basename)
            print basename, 'selected'

        dialog.destroy()

        # Refresh GUI if needed.
        if chosen:
            self._display.refresh_data()
            self.queue_draw()

    def _save_cb(self, widget, data=None):
        rgb = self._kinect.latest_rgb
        depth = self._kinect.latest_depth
        fname_base = time.strftime('%Y-%m-%d_%H-%M-%S', time.localtime())
        numpy.save(fname_base + '_rgb', rgb)
        numpy.save(fname_base + '_depth', depth)
        print 'Saved with "%s" base filename' % fname_base

    def _pause_cb(self, widget, data=None):
        self._paused = not self._paused
        self.save.set_sensitive(self._paused)
        self.choose.set_sensitive(self._paused)

        if not self._paused:
            self.pause.set_label(gtk.STOCK_MEDIA_PAUSE)
            # Try to prevent unwanted redraw.
            if not data:
                self._display.refresh_data()
                self.queue_draw()
            self.timer_id = gobject.timeout_add(self.REFRESH_DELAY,
                    self._timedout)
        else:
            self.pause.set_label(gtk.STOCK_REFRESH)

    def _timedout(self):
        # Stop auto refresh if no Kinect is detected.
        if self._kinect.latest_present:
            self._display.refresh_data()
            self.queue_draw()
        else:
            if not self._paused:
                print 'No Kinect found, stopping auto-refresh'
                self._pause_cb(None, True)

        # Timer is repeated until False is returned.
        return not self._paused

    def run(self):
        gtk.main()
Ejemplo n.º 41
0
from robot import Robot
from kinect2path import plan, World, coordTransform, getLocalGoal, local2global

# from matplotlib import pyplot as plt

TCP_IP = '192.168.10.7'
TCP_PORT = 50000
BUFFER_SIZE = 1024

PLAN_TIME = 100.0

x_g = [-1.5, 0] # Goal in global coordinates
th_g = 3.0* math.pi/4.0 # Goal in global coordinates (+CCW)

vicon = Vicon(TCP_IP, TCP_PORT, BUFFER_SIZE)
kinect = Kinect()
robot = Robot(obstacle_avoid=True,min_motor_speed=20,max_motor_speed=180)

robot.open('/dev/ttyTHS2', 19200)
robot.write_motors() # Writes initial value (0) to motors

traj_count = 1

t_init = time.time()
t_plan = t_init

world_size = [51,25]
local_start = [int(world_size[0]/2.0), 0]

for i in range(5):
	raw_depth = kinect.get_raw_depth()
Ejemplo n.º 42
0
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self, True)
        """
		Dynamixel bus
		TODO: add other motors here as needed with their correct address"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)

        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 1)
        shld = DXL_MX(port_num, 2)
        elbw = DXL_MX(port_num, 3)
        wrst = DXL_AX(port_num, 4)
        wrst2 = DXL_AX(port_num, 5)
        #wrst3 = DXL_XL(port_num, 6)
        #grip = DXL_XL(port_num, 7)
        #wrst2.set_compliance(8,64)
        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm((base, shld, elbw, wrst, wrst2), 0)
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
        """
		Attach Functions to Buttons & Sliders
		TODO: NAME AND CONNECT BUTTONS AS NEEDED
		"""
        self.ui.btn_estop.clicked.connect(self.estop)
        self.ui.btn_exec.clicked.connect(self.execute)
        self.ui.btn_task1.clicked.connect(self.record)
        self.ui.btn_task2.clicked.connect(self.clear_waypoints)
        self.ui.btn_task3.clicked.connect(self.toggle_gripper)
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(
            partial(self.sm.set_next_state, "calibrate"))
        self.ui.btnUser2.setText("Block Detector")
        self.ui.btnUser2.clicked.connect(
            partial(self.sm.set_next_state, "block detection"))
        self.ui.btnUser2.setText("Color Buckets")
        self.ui.btnUser2.clicked.connect(
            partial(self.sm.set_next_state, "color buckets"))
        self.ui.btnUser3.setText("Click Grab/Drop Mode")
        self.ui.btnUser3.clicked.connect(
            partial(self.sm.set_next_state, "click grab drop"))
        self.ui.btnUser4.setText("Pick n' Stack")
        self.ui.btnUser4.clicked.connect(
            partial(self.sm.set_next_state, "pick and stack"))
        self.ui.btnUser5.setText("Line 'em up")
        self.ui.btnUser5.clicked.connect(
            partial(self.sm.set_next_state, "line them up"))
        self.ui.btnUser6.setText("Stack 'em high")
        self.ui.btnUser6.clicked.connect(
            partial(self.sm.set_next_state, "stack them high"))
        self.ui.btnUser7.setText("Block slider")
        self.ui.btnUser7.clicked.connect(
            partial(self.sm.set_next_state, "block slider"))
        self.ui.btnUser8.setText("Hot swap")
        self.ui.btnUser8.clicked.connect(
            partial(self.sm.set_next_state, "hot swap"))
        self.ui.btnUser9.setText("Calibration Accuracy")
        self.ui.btnUser9.clicked.connect(
            partial(self.sm.set_next_state, "Calibration Accuracy"))

        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)

        self.ui.sldrWrist2.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist3.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)

        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")
        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)
        """initalize rexarm"""
        self.rexarm.initialize()
        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)
        self.videoThread.start()

        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(
            self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(
            self.updateStatusMessage)
        self.displayThread.start()
        """
		Setup Timer
		this runs the trackMouse function every 50ms
		"""
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)
Ejemplo n.º 43
0
#     [0, 1, 0, 0],
#     [0, 0, 1, 0],
# ])

if GRID_VS_LINE:
    gridshape = (30, 60)
    rows, cols = np.indices(gridshape)
    rows = (1080 * rows / gridshape[0]).astype(np.int32)
    cols = (1920 * cols / gridshape[1]).astype(np.int32)
    y_coords = rows.flatten()
    x_coords = cols.flatten()
else:
    y_coords = np.ones([1920]) * 1080 / 2
    x_coords = np.arange(1920)

k = Kinect()
k.start()

for i in range(500):
    start = time.time()
    rgb, d = k.get_current_rgbd_frame(copy=False)
    rgb[..., 2] = np.logical_or(d > 5000, d < 150) * 255
    print("kinect time: %f\t" % (time.time() - start), end="")
    cv2.imshow('color', rgb)
    cv2.waitKey(1)

    start = time.time()

    if GRID_VS_LINE:
        d_coord = d[rows, cols].flatten()
    else:
Ejemplo n.º 44
0
class Gui(QMainWindow):
    """ 
    Main GUI Class
    contains the main function and interfaces between 
    the GUI and functions
    """
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self, True)
        """dynamixel bus -- add other motors here"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)
        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 0)
        shld = DXL_MX(port_num, 1)
        elbw = DXL_MX(port_num, 2)
        wrst = DXL_AX(port_num, 3)
        wrst2 = DXL_XL(port_num, 4)
        grip = DXL_XL(port_num, 5)
        """Objects Using Other Classes"""
        self.rexarm = Rexarm((base, shld, elbw, wrst, wrst2), grip)
        self.kinect = Kinect(self.rexarm)
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
        """ 
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        self.ui.btn_estop.clicked.connect(self.estop)
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(
            partial(self.sm.set_next_state, "calibrate"))
        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)

        ### sliders gripper
        self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip2.valueChanged.connect(self.sliderChange)

        ### button gripper open & close
        self.ui.btnUser5.clicked.connect(self.btn2clicked)
        self.ui.btnUser6.clicked.connect(self.btn1clicked)

        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")
        self.ui.btn_exec.clicked.connect(
            partial(self.sm.set_next_state, "execute"))

        self.ui.btnUser2.setText("Record wp")
        self.ui.btnUser3.setText("Clear wp")
        self.ui.btnUser4.setText("Click and pick")
        self.ui.btnUser2.clicked.connect(
            partial(self.sm.set_next_state, "add_wp"))
        self.ui.btnUser3.clicked.connect(
            partial(self.sm.set_next_state, "clear_wp"))
        self.ui.btnUser4.clicked.connect(
            partial(self.sm.set_next_state, "click_and_pick"))
        self.ui.btnUser7.setText("Save Calibration Points")
        self.ui.btnUser8.setText("Load Previous Calibration")
        self.ui.btnUser7.clicked.connect(
            partial(self.sm.set_next_state, "save_calibration_points"))
        self.ui.btnUser8.clicked.connect(
            partial(self.sm.set_next_state, "load_previous_calibration"))
        self.ui.btnUser9.setText("Record Block Position")
        self.ui.btnUser9.clicked.connect(
            partial(self.sm.set_next_state, "record_block_position"))
        self.ui.btnUser5.setText("Open Gripper")
        self.ui.btnUser6.setText("Close Gripper")

        self.ui.btn_task1.clicked.connect(
            partial(self.sm.set_next_state, "mirror"))

        self.ui.btn_task2.clicked.connect(
            partial(self.sm.set_next_state, "stack_3"))
        self.ui.btn_task3.clicked.connect(
            partial(self.sm.set_next_state, "line_em_up"))
        self.ui.btn_task4.clicked.connect(
            partial(self.sm.set_next_state, "stack_em_high"))

        self.ui.btn_exec_6.clicked.connect(
            partial(self.sm.set_next_state, "pyramid5"))
        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)
        """initalize rexarm"""
        self.rexarm.initialize()
        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)
        self.videoThread.start()

        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(
            self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(
            self.updateStatusMessage)
        self.displayThread.start()
        """ 
        Setup Timer 
        this runs the trackMouse function every 50ms
        """
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)

    """ Slots attach callback functions to signals emitted from threads"""

    @pyqtSlot(QImage, QImage, QImage)
    def setImage(self, rgb_image, depth_image, detect_image):
        if (self.ui.radioVideo.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(rgb_image))
        if (self.ui.radioDepth.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(depth_image))
        if (self.ui.radioDetect.isChecked()):
            self.ui.videoDisplay.setPixmap(QPixmap.fromImage(detect_image))

    @pyqtSlot(list)
    def updateJointReadout(self, joints):
        self.ui.rdoutBaseJC.setText(str("%+.2f" % (joints[0] * R2D)))
        self.ui.rdoutShoulderJC.setText(
            str("%+.2f" % ((joints[1] * R2D) + 90.0)))
        self.ui.rdoutElbowJC.setText(str("%+.2f" % (joints[2] * R2D)))
        self.ui.rdoutWristJC.setText(str("%+.2f" % (joints[3] * R2D)))
###
#self.ui.rdoutGrip1.setText(str("%+.2f" % (joints[4]*R2D)))
#self.ui.rdoutGrip2.setText(str("%+.2f" % (joints[5]*R2D)))
###

    @pyqtSlot(list)
    def updateEndEffectorReadout(self, pos):
        self.ui.rdoutX.setText(str("%+.2f" % (pos[0])))
        self.ui.rdoutY.setText(str("%+.2f" % (pos[1])))
        self.ui.rdoutZ.setText(str("%+.2f" % (pos[2])))
        self.ui.rdoutT.setText(str("%+.2f" % (pos[3])))

    @pyqtSlot(str)
    def updateStatusMessage(self, msg):
        self.ui.rdoutStatus.setText(msg)

    """ Other callback functions attached to GUI elements"""

    def estop(self):
        self.rexarm.estop = True
        self.sm.set_next_state("estop")

    def sliderChange(self):
        """ 
        Function to change the slider labels when sliders are moved
        and to command the arm to the given position
        """
        self.ui.rdoutBase.setText(str(self.ui.sldrBase.value()))
        self.ui.rdoutShoulder.setText(str(self.ui.sldrShoulder.value()))
        self.ui.rdoutElbow.setText(str(self.ui.sldrElbow.value()))
        self.ui.rdoutWrist.setText(str(self.ui.sldrWrist.value()))
        ###
        self.ui.rdoutGrip1.setText(str(self.ui.sldrGrip1.value()))
        self.ui.rdoutGrip2.setText(str(self.ui.sldrGrip2.value()))
        ###
        self.ui.rdoutTorq.setText(str(self.ui.sldrMaxTorque.value()) + "%")
        self.ui.rdoutSpeed.setText(str(self.ui.sldrSpeed.value()) + "%")
        self.rexarm.set_torque_limits([self.ui.sldrMaxTorque.value() / 100.0] *
                                      self.rexarm.num_joints,
                                      update_now=False)
        self.rexarm.set_speeds_normalized_global(self.ui.sldrSpeed.value() /
                                                 100.0,
                                                 update_now=False)
        ###
        joint_positions = np.array([
            self.ui.sldrBase.value() * D2R,
            self.ui.sldrShoulder.value() * D2R,
            self.ui.sldrElbow.value() * D2R,
            self.ui.sldrWrist.value() * D2R, (self.ui.sldrGrip1.value()) * D2R
        ])

        self.rexarm.set_positions(joint_positions, update_now=False)


###

    def btn1clicked(self):
        self.rexarm.close_gripper()

    def btn2clicked(self):
        self.rexarm.open_gripper()

    def btn3clicked(self):
        initdeg = -39.5
        self.rexarm.set_gripper_rotate(initdeg)

    def directControlChk(self, state):
        if state == Qt.Checked:
            self.sm.set_next_state("manual")
            self.ui.SliderFrame.setEnabled(True)
        else:
            self.sm.set_next_state("idle")
            self.ui.SliderFrame.setEnabled(False)

    def trackMouse(self):
        """ 
        Mouse position presentation in GUI
        TODO: after implementing workspace calibration 
        display the world coordinates the mouse points to 
        in the RGB video image.
        """

        x = QWidget.mapFromGlobal(self, QCursor.pos()).x()
        y = QWidget.mapFromGlobal(self, QCursor.pos()).y()
        if ((x < MIN_X) or (x >= MAX_X) or (y < MIN_Y) or (y >= MAX_Y)):
            self.ui.rdoutMousePixels.setText("(-,-,-)")
            self.ui.rdoutMouseWorld.setText("(-,-,-)")
        else:
            x = x - MIN_X
            y = y - MIN_Y
            """color calibration"""
            #color = self.kinect.colorDetector(x,y)
            #self.kinect.colorCalibration(x,y)
            #self.kinect.block_detection_verification(x,y)
            # map real world
            real_x = self.kinect.real_coord[x][y][0]
            real_y = self.kinect.real_coord[x][y][1]

            if (self.kinect.currentDepthFrame.any() != 0):
                z = self.kinect.currentDepthFrame[y][x]
                real_z = self.kinect.convertDepthtomm(z)
                self.ui.rdoutMousePixels.setText("(%.0f,%.0f,%.0f)" %
                                                 (x, y, z))
                if self.kinect.kinectCalibrated == True:
                    self.ui.rdoutMouseWorld.setText("(%.0f,%.0f,%.0f)" %
                                                    (real_x, real_y, real_z))
                else:
                    self.ui.rdoutMouseWorld.setText("(-,-,-)")

    def mousePressEvent(self, QMouseEvent):
        """ 
        Function used to record mouse click positions for calibration 
        """
        """ Get mouse posiiton """
        x = QMouseEvent.x()
        y = QMouseEvent.y()
        """ If mouse position is not over the camera image ignore """
        if ((x < MIN_X) or (x > MAX_X) or (y < MIN_Y) or (y > MAX_Y)): return
        """ Change coordinates to image axis """
        self.kinect.last_click[0] = x - MIN_X
        self.kinect.last_click[1] = y - MIN_Y
        self.kinect.new_click = True
        #print(self.kinect.last_click)

    def shutdown(self):
        self.rexarm.shutdown()

    def closeEvent(self, *args, **kwargs):
        super(QMainWindow, self).closeEvent(*args, **kwargs)
        print("EXITING...")
        self.shutdown()
Ejemplo n.º 45
0
    def __init__(self,parent=None):
        QWidget.__init__(self,parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self,True)

        """dynamixel bus -- add other motors here"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)
        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 0)
        shld = DXL_MX(port_num, 1)
        elbw = DXL_MX(port_num, 2)
        wrst = DXL_AX(port_num, 3)
        wrst2 = DXL_XL(port_num, 4) # NIC 10/4
        grip = DXL_XL(port_num, 5) # NIC 10/4

        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm((base,shld,elbw,wrst,wrst2),grip) # NIC 10/4
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
    
        """ 
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        self.ui.btn_estop.clicked.connect(self.estop)
	self.ui.btn_exec.clicked.connect(partial(self.sm.set_next_state,"execute_plan"))
        self.ui.btn_task1.clicked.connect(partial(self.sm.set_next_state,"task_1"))
	self.ui.btn_task2.clicked.connect(partial(self.sm.set_next_state,"task_2"))
	self.ui.btn_task4.clicked.connect(partial(self.sm.set_next_state,"task_5"))
	#self.ui.btn_exec.clicked.connect(partial(self.sm.set_next_state,"execute"))
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(partial(self.sm.set_next_state, "calibrate"))
	self.ui.btnUser2.setText("Record Waypoints")
	self.ui.btnUser2.clicked.connect(partial(self.sm.set_next_state, "record"))
	self.ui.btnUser3.setText("Play")
	self.ui.btnUser3.clicked.connect(partial(self.sm.set_next_state, "play"))
	self.ui.btnUser4.setText("Open Gripper") # NIC 10/4
	self.ui.btnUser4.clicked.connect(self.rexarm.open_gripper) # NIC 10/4
	self.ui.btnUser5.setText("Close Gripper") # NIC 10/4
	self.ui.btnUser5.clicked.connect(self.rexarm.close_gripper) # NIC 10/4
        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip2.valueChanged.connect(self.sliderChange)
        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")

        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)

        """initalize rexarm"""
        self.rexarm.initialize()

        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)        
        self.videoThread.start()

        
        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()
        

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(self.updateStatusMessage)
        self.displayThread.start()

        """ 
        Setup Timer 
        this runs the trackMouse function every 50ms
        """
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)
Ejemplo n.º 46
0
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)
        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self, True)
        """dynamixel bus -- add other motors here"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)
        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 0)
        shld = DXL_MX(port_num, 1)
        elbw = DXL_MX(port_num, 2)
        wrst = DXL_AX(port_num, 3)
        wrst2 = DXL_XL(port_num, 4)
        grip = DXL_XL(port_num, 5)
        """Objects Using Other Classes"""
        self.rexarm = Rexarm((base, shld, elbw, wrst, wrst2), grip)
        self.kinect = Kinect(self.rexarm)
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
        """ 
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        self.ui.btn_estop.clicked.connect(self.estop)
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(
            partial(self.sm.set_next_state, "calibrate"))
        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)

        ### sliders gripper
        self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)
        self.ui.sldrGrip2.valueChanged.connect(self.sliderChange)

        ### button gripper open & close
        self.ui.btnUser5.clicked.connect(self.btn2clicked)
        self.ui.btnUser6.clicked.connect(self.btn1clicked)

        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")
        self.ui.btn_exec.clicked.connect(
            partial(self.sm.set_next_state, "execute"))

        self.ui.btnUser2.setText("Record wp")
        self.ui.btnUser3.setText("Clear wp")
        self.ui.btnUser4.setText("Click and pick")
        self.ui.btnUser2.clicked.connect(
            partial(self.sm.set_next_state, "add_wp"))
        self.ui.btnUser3.clicked.connect(
            partial(self.sm.set_next_state, "clear_wp"))
        self.ui.btnUser4.clicked.connect(
            partial(self.sm.set_next_state, "click_and_pick"))
        self.ui.btnUser7.setText("Save Calibration Points")
        self.ui.btnUser8.setText("Load Previous Calibration")
        self.ui.btnUser7.clicked.connect(
            partial(self.sm.set_next_state, "save_calibration_points"))
        self.ui.btnUser8.clicked.connect(
            partial(self.sm.set_next_state, "load_previous_calibration"))
        self.ui.btnUser9.setText("Record Block Position")
        self.ui.btnUser9.clicked.connect(
            partial(self.sm.set_next_state, "record_block_position"))
        self.ui.btnUser5.setText("Open Gripper")
        self.ui.btnUser6.setText("Close Gripper")

        self.ui.btn_task1.clicked.connect(
            partial(self.sm.set_next_state, "mirror"))

        self.ui.btn_task2.clicked.connect(
            partial(self.sm.set_next_state, "stack_3"))
        self.ui.btn_task3.clicked.connect(
            partial(self.sm.set_next_state, "line_em_up"))
        self.ui.btn_task4.clicked.connect(
            partial(self.sm.set_next_state, "stack_em_high"))

        self.ui.btn_exec_6.clicked.connect(
            partial(self.sm.set_next_state, "pyramid5"))
        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)
        """initalize rexarm"""
        self.rexarm.initialize()
        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)
        self.videoThread.start()

        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(
            self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(
            self.updateStatusMessage)
        self.displayThread.start()
        """ 
        Setup Timer 
        this runs the trackMouse function every 50ms
        """
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)
Ejemplo n.º 47
0
    def __init__(self,parent=None):
        QWidget.__init__(self,parent)
        self.ui = Ui_MainWindow()
        self.ui.setupUi(self)

        """ Set GUI to track mouse """
        QWidget.setMouseTracking(self,True)

        """
        Dynamixel bus
        TODO: add other motors here as needed with their correct address"""
        self.dxlbus = DXL_BUS(DEVICENAME, BAUDRATE)
        port_num = self.dxlbus.port()
        base = DXL_MX(port_num, 1)
        shld = DXL_MX(port_num, 2)
        elbw = DXL_MX(port_num, 3)
        wrst = DXL_AX(port_num, 4)
        wrst2 = DXL_AX(port_num, 5)
        wrst3 = DXL_XL(port_num, 6)
        gripper = DXL_XL(port_num, 7)

        """Objects Using Other Classes"""
        self.kinect = Kinect()
        self.rexarm = Rexarm((base,shld,elbw,wrst,wrst2,wrst3),gripper)
        self.tp = TrajectoryPlanner(self.rexarm)
        self.sm = StateMachine(self.rexarm, self.tp, self.kinect)
    
        """ 
        Attach Functions to Buttons & Sliders
        TODO: NAME AND CONNECT BUTTONS AS NEEDED
        """
        self.ui.btn_exec.clicked.connect(self.execute)


        self.ui.btn_estop.clicked.connect(self.estop)
        
        self.ui.btnUser1.setText("Calibrate")
        self.ui.btnUser1.clicked.connect(partial(self.sm.set_next_state, "calibrate"))
        
        self.ui.sldrBase.valueChanged.connect(self.sliderChange)
        self.ui.sldrShoulder.valueChanged.connect(self.sliderChange)
        self.ui.sldrElbow.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist2.valueChanged.connect(self.sliderChange)
        self.ui.sldrWrist3.valueChanged.connect(self.sliderChange)
        #self.ui.sldrGrip1.valueChanged.connect(self.sliderChange)

        self.ui.sldrMaxTorque.valueChanged.connect(self.sliderChange)
        self.ui.sldrSpeed.valueChanged.connect(self.sliderChange)
        self.ui.chk_directcontrol.stateChanged.connect(self.directControlChk)
        self.ui.rdoutStatus.setText("Waiting for input")

        """Team10 section for buttons"""
        self.ui.btnUser2.setText("teach")
        self.ui.btnUser2.clicked.connect(partial(self.sm.set_next_state, "teach"))

        self.ui.btnUser3.setText("repeat")
        self.ui.btnUser3.clicked.connect(partial(self.sm.set_next_state, "repeat"))
        
        self.ui.btnUser4.setText("Set ROI")
        self.ui.btnUser4.clicked.connect(partial(self.sm.set_next_state, "set_roi"))

        self.ui.btnUser5.setText("Set Exclusion")
        self.ui.btnUser5.clicked.connect(partial(self.sm.set_next_state, "set_exclusion"))

        self.ui.btnUser6.setText("Save frames")
        self.ui.btnUser6.clicked.connect(partial(self.sm.set_next_state, "save_frames"))


        self.ui.btn_task3.clicked.connect(partial(self.sm.set_next_state, "task3"))

        self.ui.btnUser7.setText("Click & Grab")
        self.ui.btnUser7.clicked.connect(partial(self.sm.set_next_state, "ClickandGrab"))
 

        """initalize manual control off"""
        self.ui.SliderFrame.setEnabled(False)

        """initalize rexarm"""
        self.rexarm.initialize()

        """Setup Threads"""
        self.videoThread = VideoThread(self.kinect)
        self.videoThread.updateFrame.connect(self.setImage)        
        self.videoThread.start()

        
        self.logicThread = LogicThread(self.sm)
        self.logicThread.start()
        

        self.displayThread = DisplayThread(self.rexarm, self.sm)
        self.displayThread.updateJointReadout.connect(self.updateJointReadout)
        self.displayThread.updateEndEffectorReadout.connect(self.updateEndEffectorReadout)
        self.displayThread.updateStatusMessage.connect(self.updateStatusMessage)
        self.displayThread.start()

        """ 
        Setup Timer 
        this runs the trackMouse function every 50ms
        """
        self._timer = QTimer(self)
        self._timer.timeout.connect(self.trackMouse)
        self._timer.start(50)
Ejemplo n.º 48
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import cv2
import numpy as np
import config

from kinect import Kinect
from body import BodyDetector
from hand import HandDetector, HandContourDetector
from pose import PoseClassifier, OpenCloseClassifier, MultiPoseClassifier


kinect = Kinect()
body = BodyDetector()
hand = HandDetector()
contour = HandContourDetector()
# pose   = PoseClassifier(OpenCloseClassifier())
pose = PoseClassifier(MultiPoseClassifier())


for (depth, depth8, rgb) in kinect.get_data():

    b = body.run(depth8)
    (h, _) = hand.run(b)

    # cv2.imshow('hand', h)

    (ccc, box, hc) = contour.run(h)

    if len(ccc) < 100: