Esempio n. 1
0
    def actAndStore(self, action):
        if self.rs.det:
            realU = muscleFilter(action)
            #computation of the arm state
            realNextState = self.arm.computeNextState(realU, self.arm.getState())
 
            #computation of the approximated state
            tmpState = self.arm.getState()
                
            estimNextState = realNextState
        else:
            #realU = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
            realU = getNoisyCommand(action,self.arm.musclesP.knoiseU)
            realU = muscleFilter(realU)


            #computation of the arm state
            realNextState = self.arm.computeNextState(realU, self.arm.getState())
     
            #computation of the approximated state
            tmpState = self.arm.getState()
                
            action = muscleFilter(action)
            estimNextState = self.stateEstimator.getEstimState(tmpState,action)


            
        #print estimNextState

        self.arm.setState(realNextState)

        #computation of the cost

        #get dotq and q from the state vector
        _, q = self.arm.getDotQAndQFromStateVector(tmpState)
        coordElbow, self.coordHand = self.arm.mgdFull(q)
        cost = self.trajCost.computeStateTransitionCost(realU, self.coordHand)
        
        stepStore=[]
        stepStore.append(self.vectarget)
        stepStore.append(self.estimState)
        stepStore.append(tmpState)
        stepStore.append(realU)
        stepStore.append(action)
        stepStore.append(estimNextState)
        stepStore.append(realNextState)
        stepStore.append([coordElbow[0], coordElbow[1]])
        stepStore.append([self.coordHand[0], self.coordHand[1]])
        #print ("before",stepStore)
        tmpstore = np.array(stepStore).flatten()
        row = [item for sub in tmpstore for item in sub]
        #print ("store",row)
        self.dataStore.append(row)
        
        self.i+=1
        self.t += self.rs.dt
        self.estimState = estimNextState
        return [realU], [cost]
Esempio n. 2
0
    def actAndStore(self, action):
        if self.rs.det:
            realU = muscleFilter(action)
            #computation of the arm state
            realNextState = self.arm.computeNextState(realU,
                                                      self.arm.getState())

            #computation of the approximated state
            tmpState = self.arm.getState()

            estimNextState = realNextState
        else:
            #realU = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
            realU = getNoisyCommand(action, self.arm.musclesP.knoiseU)
            realU = muscleFilter(realU)

            #computation of the arm state
            realNextState = self.arm.computeNextState(realU,
                                                      self.arm.getState())

            #computation of the approximated state
            tmpState = self.arm.getState()

            action = muscleFilter(action)
            estimNextState = self.stateEstimator.getEstimState(
                tmpState, action)

        #print estimNextState

        self.arm.setState(realNextState)

        #computation of the cost

        #get dotq and q from the state vector
        _, q = self.arm.getDotQAndQFromStateVector(tmpState)
        coordElbow, self.coordHand = self.arm.mgdFull(q)
        cost = self.trajCost.computeStateTransitionCost(realU, self.coordHand)

        stepStore = []
        stepStore.append(self.vectarget)
        stepStore.append(self.estimState)
        stepStore.append(tmpState)
        stepStore.append(realU)
        stepStore.append(action)
        stepStore.append(estimNextState)
        stepStore.append(realNextState)
        stepStore.append([coordElbow[0], coordElbow[1]])
        stepStore.append([self.coordHand[0], self.coordHand[1]])
        #print ("before",stepStore)
        tmpstore = np.array(stepStore).flatten()
        row = [item for sub in tmpstore for item in sub]
        #print ("store",row)
        self.dataStore.append(row)

        self.i += 1
        self.t += self.rs.dt
        self.estimState = estimNextState
        return [realU], [cost]
Esempio n. 3
0
    def __act__(self, action):
        if self.rs.det:
            realU = muscleFilter(action)
            #computation of the arm state
            realNextState = self.arm.computeNextState(realU,
                                                      self.arm.getState())

            #computation of the approximated state
            tmpState = self.arm.getState()

            estimNextState = realNextState
        else:
            #realU = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
            realU = getNoisyCommand(action, self.arm.musclesP.knoiseU)
            realU = muscleFilter(realU)

            #computation of the arm state
            realNextState = self.arm.computeNextState(realU,
                                                      self.arm.getState())

            #computation of the approximated state
            tmpState = self.arm.getState()

            action = muscleFilter(action)
            estimNextState = self.stateEstimator.getEstimState(
                tmpState, action)

        #print estimNextState

        self.arm.setState(realNextState)

        #computation of the cost

        #get dotq and q from the state vector
        _, q = self.arm.getDotQAndQFromStateVector(tmpState)
        self.coordHand = self.arm.mgdEndEffector(q)
        costAct = self.trajCost.computeStateTransitionCost(
            realU, self.coordHand)
        #print ("dotq :",dotq)
        #computation of the coordinates to check if the target is reach or not
        self.i += 1
        self.t += self.rs.dt
        self.estimState = estimNextState
        if (self.coordHand[1] >= self.rs.YTarget
                or self.i >= self.rs.maxSteps):
            costAct += self.trajCost.computeFinalReward(
                self.arm, self.t, self.coordHand, self.sizeOfTarget)
        self.cost += costAct
        return [realU], [costAct]
Esempio n. 4
0
    def __act__(self, action):
        if self.rs.det:
            realU = muscleFilter(action)
            #computation of the arm state
            realNextState = self.arm.computeNextState(realU, self.arm.getState())
 
            #computation of the approximated state
            tmpState = self.arm.getState()
                
            estimNextState = realNextState
        else:
            #realU = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
            realU = getNoisyCommand(action,self.arm.musclesP.knoiseU)
            realU = muscleFilter(realU)


            #computation of the arm state
            realNextState = self.arm.computeNextState(realU, self.arm.getState())
     
            #computation of the approximated state
            tmpState = self.arm.getState()
                
            action = muscleFilter(action)
            estimNextState = self.stateEstimator.getEstimState(tmpState,action)


            
        #print estimNextState

        self.arm.setState(realNextState)

        #computation of the cost

        #get dotq and q from the state vector
        _, q = self.arm.getDotQAndQFromStateVector(tmpState)
        self.coordHand = self.arm.mgdEndEffector(q)
        costAct = self.trajCost.computeStateTransitionCost(realU, self.coordHand)
        #print ("dotq :",dotq)
        #computation of the coordinates to check if the target is reach or not
        self.i+=1
        self.t += self.rs.dt
        self.estimState = estimNextState
        if(self.coordHand[1] >= self.rs.YTarget or self.i >= self.rs.maxSteps):
            costAct += self.trajCost.computeFinalReward(self.arm,self.t, self.coordHand, self.sizeOfTarget)
        self.cost+=costAct
        return [realU], [costAct]
Esempio n. 5
0
    def runTrajectory(self, x, y, foldername):
        '''
    	Generates trajectory from the initial position (x, y)
    
    	Inputs:		-x: abscissa of the initial position, float
    			-y: ordinate of the initial position, float
    
    	Output:		-cost: the cost of the trajectory, float
    	'''
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1,q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        coordElbow, coordHand = self.arm.mgdFull(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        i, t, cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        estimState = state
        dataStore = []
        qtarget1, qtarget2 = self.arm.mgi(self.rs.XTarget, self.rs.YTarget)
        vectarget = [0.0, 0.0, qtarget1, qtarget2]

        #loop to generate next position until the target is reached 
        while coordHand[1] < self.rs.YTarget and i < self.rs.numMaxIter:
            stepStore = []
            #computation of the next muscular activation vector using the controller theta
            #print ("state :",self.arm.getState())

            U = self.controller.computeOutput(estimState)

            if det:
                Unoisy = muscleFilter(U)
            else:
                Unoisy = getNoisyCommand(U,self.arm.musclesP.knoiseU)
                Unoisy = muscleFilter(Unoisy)
            #computation of the arm state
            realNextState = self.arm.computeNextState(Unoisy, self.arm.getState())
 
            #computation of the approximated state
            tmpState = self.arm.getState()

            if det:
                estimNextState = realNextState
            else:
                U = muscleFilter(U)
                estimNextState = self.stateEstimator.getEstimState(tmpState,U)
            
            #print estimNextState

            self.arm.setState(realNextState)

            #computation of the cost
            cost += self.computeStateTransitionCost(Unoisy)

            '''
            print "U =", U
            print "Unoisy =", Unoisy
            print "estimstate =", estimState
            #print "theta =", self.controller.theta
            if math.isnan(cost):
                print "NAN : U =", U
                print "NAN : Unoisy =", Unoisy
                print "NAN : estimstate =", estimState
                #print "NAN : theta =", self.controller.theta
                sys.exit()
            '''

            #get dotq and q from the state vector
            dotq, q = getDotQAndQFromStateVector(tmpState)
            coordElbow, coordHand = self.arm.mgdFull(q)
            #print ("dotq :",dotq)
            #computation of the coordinates to check if the target is reach or not
            #code to save data of the trajectory

            #Note : these structures might be much improved
            if self.saveTraj == True: 
                stepStore.append(vectarget)
                stepStore.append(estimState)
                stepStore.append(tmpState)
                stepStore.append(Unoisy)
                stepStore.append(np.array(U))
                stepStore.append(estimNextState)
                stepStore.append(realNextState)
                stepStore.append([coordElbow[0], coordElbow[1]])
                stepStore.append([coordHand[0], coordHand[1]])
                #print ("before",stepStore)
                tmpstore = np.array(stepStore).flatten()
                row = [item for sub in tmpstore for item in sub]
                #print ("store",row)
                dataStore.append(row)

            estimState = estimNextState
            i += 1
            t += self.rs.dt

        cost += self.computeFinalReward(t,coordHand)

        if self.saveTraj == True:
            filename = findFilename(foldername+"Log/","traj",".log")
            np.savetxt(filename,dataStore)
            '''
            if coordHand[0] >= -self.sizeOfTarget/2 and coordHand[0] <= self.sizeOfTarget/2 and coordHand[1] >= self.rs.YTarget and i<230:
                foldername = pathDataFolder + "TrajRepository/"
                name = findFilename(foldername,"Traj",".traj")
                np.savetxt(name,dataStore)
            '''

        lastX = -1000 #used to ignore dispersion when the target line is not crossed
        if coordHand[1] >= self.rs.YTarget:
            lastX = coordHand[0]
        #print "end of trajectory"
        return cost, t, lastX
Esempio n. 6
0
    def runTrajectoryOpti(self, x, y):
        '''
        Generates trajectory from the initial position (x, y) use for plot trajectory wihtout save them 
    
        Inputs:        -x: abscissa of the initial position, float
                    -y: ordinate of the initial position, float
    
        Output:
                        -cost: the cost of the trajectory, float
                        -t:    time of the trajectory, float
                        -lastX: Last X
        '''
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1,q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        coordHand = self.arm.mgdEndEffector(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        i, t, cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        estimState = state


        #loop to generate next position until the target is reached 
        while coordHand[1] < self.rs.YTarget and i < self.rs.maxSteps:
            #computation of the next muscular activation vector using the controller theta
            #print ("state :",self.arm.getState())
            U = self.controller.computeOutput(estimState)

            if self.rs.det:
                realU = muscleFilter(U)
                #computation of the arm state
                realNextState = self.arm.computeNextState(realU, self.arm.getState())
     
                #computation of the approximated state
                tmpState = self.arm.getState()
                
                estimNextState = realNextState
            else:
                #realU = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
                realU = getNoisyCommand(U,self.arm.musclesP.knoiseU)
                realU = muscleFilter(realU)


                #computation of the arm state
                realNextState = self.arm.computeNextState(realU, self.arm.getState())
     
                #computation of the approximated state
                tmpState = self.arm.getState()
                
                U = muscleFilter(U)
                estimNextState = self.stateEstimator.getEstimState(tmpState,U)


            
            #print estimNextState

            self.arm.setState(realNextState)

            #computation of the cost
            cost += self.trajCost.computeStateTransitionCost(realU)
            #get dotq and q from the state vector
            _, q = self.arm.getDotQAndQFromStateVector(tmpState)
            coordHand = self.arm.mgdEndEffector(q)
            #print ("dotq :",dotq)
            #computation of the coordinates to check if the target is reach or not


            estimState = estimNextState
            i += 1
            t += self.rs.dt


        cost += self.trajCost.computeFinalReward(self.arm,t,coordHand, self.sizeOfTarget)
        lastX = -1000 #used to ignore dispersion when the target line is not crossed
        if coordHand[1] >= self.rs.YTarget:
            lastX = coordHand[0]
        return cost, t, lastX
Esempio n. 7
0
    def runTrajectoryOpti(self, x, y):
        '''
        Generates trajectory from the initial position (x, y) use for plot trajectory wihtout save them 
    
        Inputs:        -x: abscissa of the initial position, float
                    -y: ordinate of the initial position, float
    
        Output:
                        -cost: the cost of the trajectory, float
                        -t:    time of the trajectory, float
                        -lastX: Last X
        '''
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1, q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        coordHand = self.arm.mgdEndEffector(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        i, t, cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        estimState = state

        #loop to generate next position until the target is reached
        while coordHand[1] < self.rs.YTarget and i < self.rs.maxSteps:
            #computation of the next muscular activation vector using the controller theta
            #print ("state :",self.arm.getState())
            U = self.controller.computeOutput(estimState)

            if self.rs.det:
                realU = muscleFilter(U)
                #computation of the arm state
                realNextState = self.arm.computeNextState(
                    realU, self.arm.getState())

                #computation of the approximated state
                tmpState = self.arm.getState()

                estimNextState = realNextState
            else:
                #realU = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
                realU = getNoisyCommand(U, self.arm.musclesP.knoiseU)
                realU = muscleFilter(realU)

                #computation of the arm state
                realNextState = self.arm.computeNextState(
                    realU, self.arm.getState())

                #computation of the approximated state
                tmpState = self.arm.getState()

                U = muscleFilter(U)
                estimNextState = self.stateEstimator.getEstimState(tmpState, U)

            #print estimNextState

            self.arm.setState(realNextState)

            #computation of the cost
            cost += self.trajCost.computeStateTransitionCost(realU)
            #get dotq and q from the state vector
            _, q = self.arm.getDotQAndQFromStateVector(tmpState)
            coordHand = self.arm.mgdEndEffector(q)
            #print ("dotq :",dotq)
            #computation of the coordinates to check if the target is reach or not

            estimState = estimNextState
            i += 1
            t += self.rs.dt

        cost += self.trajCost.computeFinalReward(self.arm, t, coordHand,
                                                 self.sizeOfTarget)
        lastX = -1000  #used to ignore dispersion when the target line is not crossed
        if coordHand[1] >= self.rs.YTarget:
            lastX = coordHand[0]
        return cost, t, lastX