예제 #1
0
    def saveOneTraj(self, x, y):
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1,q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        self.coordHand = self.arm.mgdEndEffector(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        self.i, self.t, self.cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        self.estimState = state
        
        self.dataStore=[]
        self.vectarget=[0.0, 0.0, self.rs.XTarget, self.rs.YTarget]
        totalCost=0
        while(not self.isFinished()):
            action=self.actor.action(self.estimState)
            _,cost = self.actAndStore(action)
            totalCost+= cost[0]
        totalCost += self.trajCost.computeFinalReward(self.arm,self.t,self.coordHand,self.sizeOfTarget)
        filename = findDataFilename(self.saveName+"Log/","traj"+str(x)+"-"+str(y),".log")
        np.savetxt(filename,self.dataStore)
        
        #used to ignore dispersion when the target line is not crossed
        if self.coordHand[1] >= self.rs.YTarget:
            self.lastCoord.append(self.coordHand[0])
            
        return totalCost, self.t
예제 #2
0
def plotEstimator(setupFile, target_size, thetaName,x, y):
    arm = ArmType["Arm26"]()
    
    setupFile.det = True
    tm=TrajMaker(setupFile, target_size, False, thetaName, "Inv")
    state1,_,_,_=tm.runTrajectoryForPlot(x, y)
    
    setupFile.det = False
    state2,_,_,_=tm.runTrajectoryForPlot(x, y)
    
    tm=TrajMaker(setupFile, target_size, False, thetaName, "No")
    state3,_,_,_=tm.runTrajectoryForPlot(x, y)
    
    plt.figure(1, figsize=(16,9))
    
    hand=[]
    for state in state1 :
        hand.append(arm.mgdEndEffector(state[2:]))
    hand=np.array(hand)
    plt.plot(hand[:,0],hand[:,1], color="blue")
    
    hand=[]
    for state in state2 :
        hand.append(arm.mgdEndEffector(state[2:]))
    hand=np.array(hand)
    plt.plot(hand[:,0],hand[:,1], color="red")
    
    hand=[]
    for state in state3 :
        hand.append(arm.mgdEndEffector(state[2:]))
    hand=np.array(hand)
    plt.plot(hand[:,0],hand[:,1], color="green")
    
    plt.xlabel("X (m)")
    plt.ylabel("Y (m)")
    imageFolder = setupFile.OPTIpath + "/ImageBank/"
    name=findDataFilename(imageFolder, "Estimation", ".svg")
    plt.savefig(name, bbox_inches='tight')
    plt.show(block = True)
예제 #3
0
    def saveOneTraj(self, x, y):
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1, q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        self.coordHand = self.arm.mgdEndEffector(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        self.i, self.t, self.cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        self.estimState = state

        self.dataStore = []
        self.vectarget = [0.0, 0.0, self.rs.XTarget, self.rs.YTarget]
        totalCost = 0
        while (not self.isFinished()):
            action = self.actor.action(self.estimState)
            _, cost = self.actAndStore(action)
            totalCost += cost[0]
        totalCost += self.trajCost.computeFinalReward(self.arm, self.t,
                                                      self.coordHand,
                                                      self.sizeOfTarget)
        filename = findDataFilename(self.saveName + "Log/",
                                    "traj" + str(x) + "-" + str(y), ".log")
        np.savetxt(filename, self.dataStore)

        #used to ignore dispersion when the target line is not crossed
        if self.coordHand[1] >= self.rs.YTarget:
            self.lastCoord.append(self.coordHand[0])

        return totalCost, self.t
예제 #4
0
    def runTrajectories(self,theta, fonction):
        '''
        Generates all the trajectories of the experimental setup and return the mean cost. This function is used by cmaes to optimize the controller.
    
        Input:        -theta: vector of parameters, one dimension normalized numpy array
    
        Ouput:        -meanAll: the mean of the cost of all trajectories generated, float
        '''
        
        #c = Chrono()
        self.initTheta(theta)
        #print "theta avant appel :", theta
        #compute all the trajectories x times each, x = numberOfRepeat
        meanCost, meanTime = fonction(self.numberOfRepeat)
        #cma.plot()
        #opt = cma.CMAOptions()
        #print "CMAES options :", opt
        #c.stop()

        #print("Indiv #: ", self.call, "\n Cost: ", meanCost)
        
        if (self.call==0):
            self.localBestCost = meanCost
            self.localWorstCost = meanCost
            self.localBestTime = meanTime
            self.localWorstTime = meanTime
            self.periodMeanCost = 0.0
            self.periodMeanTime = 0.0
        else:    
            if meanCost>self.localBestCost:
                self.localBestCost = meanCost
            elif meanCost<self.localWorstCost:
                self.localWorstCost = meanCost
                
            if meanTime>self.localBestTime:
                self.localBestTime = meanTime
            elif meanTime<self.localWorstTime:
                self.localWorstTime = meanTime

        if meanCost>self.bestCost:
            self.bestCost = meanCost
            if meanCost>0:
                extension = ".save" + str(meanCost)
                filename = findDataFilename(self.foldername+"Theta/", "theta", extension)
                np.savetxt(filename, self.theta)
                filename2 = self.foldername + "Best.theta"
                np.savetxt(filename2, self.theta)
        
        self.periodMeanCost += meanCost
        self.periodMeanTime += meanTime

        self.call += 1
        self.call = self.call%self.period

        if (self.call==0):
            self.periodMeanCost = self.periodMeanCost/self.period
            self.periodMeanTime = self.periodMeanTime/self.period
            self.CMAESCostStore.append((self.localWorstCost,self.periodMeanCost,self.localBestCost))
            self.CMAESTimeStore.append((self.localWorstTime,self.periodMeanTime,self.localBestTime))
            costfoldername = self.foldername+"Cost/"
            checkIfFolderExists(costfoldername)
            cost = open(costfoldername+"cmaesCost.log","a")
            time = open(costfoldername+"cmaesTime.log","a")
            cost.write(str(self.localWorstCost)+" "+str(self.periodMeanCost)+" "+str(self.localBestCost)+"\n")
            time.write(str(self.localWorstTime)+" "+str(self.periodMeanTime)+" "+str(self.localBestTime)+"\n")
            cost.close()
            time.close()
            #np.savetxt(costfoldername+"cmaesCost.log",self.CMAESCostStore) #Note: inefficient, should rather add to the file
            #np.savetxt(costfoldername+"cmaesTime.log",self.CMAESTimeStore) #Note: inefficient, should rather add to the file

        return 10.0*(self.rs.rhoCF-meanCost)/self.rs.rhoCF
예제 #5
0
    def runTrajectory(self, x, y, foldername):
        '''
    	Generates trajectory from the initial position (x, y)
    
    	Inputs:		-x: abscissa of the initial position, float
    			-y: ordinate of the initial position, float
    
    	Output:		-cost: the cost of the trajectory, float
    	'''
        self.costU12=0
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1,q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        coordElbow, coordHand = self.arm.mgdFull(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        i, t, cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        estimState = state
        dataStore = []
        qtarget1, qtarget2 = self.arm.mgi(self.rs.XTarget, self.rs.YTarget)
        vectarget = [0.0, 0.0, qtarget1, qtarget2]

        #loop to generate next position until the target is reached 
        while coordHand[1] < self.rs.YTarget and i < self.rs.maxSteps:
            stepStore = []
            #computation of the next muscular activation vector using the controller theta
            #print ("state :",self.arm.getState())
            U = self.controller.computeOutput(estimState)

            if self.rs.det:
                Unoisy = muscleFilter(U)
            else:
                #Unoisy = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
                Unoisy = getNoisyCommand(U,self.arm.musclesP.knoiseU)
                Unoisy = muscleFilter(Unoisy)
            #computation of the arm state
            realNextState = self.arm.computeNextState(Unoisy, self.arm.getState())
 
            #computation of the approximated state
            tmpState = self.arm.getState()

            if self.rs.det:
                estimNextState = realNextState
            else:
                U = muscleFilter(U)
                estimNextState = self.stateEstimator.getEstimState(tmpState,U)
            
            #print estimNextState

            self.arm.setState(realNextState)

            #computation of the cost
            cost += self.trajCost.computeStateTransitionCost(Unoisy)
            self.costU12+=self.trajCost.computeStateTransitionCostU12(Unoisy)

            '''
            print "U =", U
            print "Unoisy =", Unoisy
            print "estimstate =", estimState
            #print "theta =", self.controller.theta
            if math.isnan(cost):
                print "NAN : U =", U
                print "NAN : Unoisy =", Unoisy
                print "NAN : estimstate =", estimState
                #print "NAN : theta =", self.controller.theta
                sys.exit()
            '''

            #get dotq and q from the state vector
            dotq, q = self.arm.getDotQAndQFromStateVector(tmpState)
            coordElbow, coordHand = self.arm.mgdFull(q)
            #print ("dotq :",dotq)
            #computation of the coordinates to check if the target is reach or not
            #code to save data of the trajectory

            #Note : these structures might be much improved
            if self.saveTraj == True: 
                stepStore.append(vectarget)
                stepStore.append(estimState)
                stepStore.append(tmpState)
                stepStore.append(Unoisy)
                stepStore.append(np.array(U))
                stepStore.append(estimNextState)
                stepStore.append(realNextState)
                stepStore.append([coordElbow[0], coordElbow[1]])
                stepStore.append([coordHand[0], coordHand[1]])
                #print ("before",stepStore)
                tmpstore = np.array(stepStore).flatten()
                row = [item for sub in tmpstore for item in sub]
                #print ("store",row)
                dataStore.append(row)

            estimState = estimNextState
            i += 1
            t += self.rs.dt

        cost += self.trajCost.computeFinalReward(self.arm,t,coordHand,self.sizeOfTarget)

        if self.saveTraj == True:
            filename = findDataFilename(foldername+"Log/","traj"+str(x)+"-"+str(y)+".",".log")
            np.savetxt(filename,dataStore)
            '''
            if coordHand[0] >= -self.sizeOfTarget/2 and coordHand[0] <= self.sizeOfTarget/2 and coordHand[1] >= self.rs.YTarget and i<230:
                foldername = pathDataFolder + "TrajRepository/"
                name = findFilename(foldername,"Traj",".traj")
                np.savetxt(name,dataStore)
            '''

        lastX = -1000 #used to ignore dispersion when the target line is not crossed
        if coordHand[1] >= self.rs.YTarget:
            lastX = coordHand[0]
        #print "end of trajectory"
        return cost, t, lastX
예제 #6
0
    def runTrajectory(self, x, y, foldername):
        '''
    	Generates trajectory from the initial position (x, y)
    
    	Inputs:		-x: abscissa of the initial position, float
    			-y: ordinate of the initial position, float
    
    	Output:		-cost: the cost of the trajectory, float
    	'''
        self.costU12 = 0
        #computes the articular position q1, q2 from the initial coordinates (x, y)
        q1, q2 = self.arm.mgi(x, y)
        #creates the state vector [dotq1, dotq2, q1, q2]
        q = createVector(q1, q2)
        state = np.array([0., 0., q1, q2])
        #print("start state --------------: ",state)

        #computes the coordinates of the hand and the elbow from the position vector
        coordElbow, coordHand = self.arm.mgdFull(q)
        #assert(coordHand[0]==x and coordHand[1]==y), "Erreur de MGD" does not work because of rounding effects

        #initializes parameters for the trajectory
        i, t, cost = 0, 0, 0
        self.stateEstimator.initStore(state)
        self.arm.setState(state)
        estimState = state
        dataStore = []
        qtarget1, qtarget2 = self.arm.mgi(self.rs.XTarget, self.rs.YTarget)
        vectarget = [0.0, 0.0, qtarget1, qtarget2]

        #loop to generate next position until the target is reached
        while coordHand[1] < self.rs.YTarget and i < self.rs.maxSteps:
            stepStore = []
            #computation of the next muscular activation vector using the controller theta
            #print ("state :",self.arm.getState())
            U = self.controller.computeOutput(estimState)

            if self.rs.det:
                Unoisy = muscleFilter(U)
            else:
                #Unoisy = getNoisyCommand(U,self.arm.getMusclesParameters().getKnoiseU())
                Unoisy = getNoisyCommand(U, self.arm.musclesP.knoiseU)
                Unoisy = muscleFilter(Unoisy)
            #computation of the arm state
            realNextState = self.arm.computeNextState(Unoisy,
                                                      self.arm.getState())

            #computation of the approximated state
            tmpState = self.arm.getState()

            if self.rs.det:
                estimNextState = realNextState
            else:
                U = muscleFilter(U)
                estimNextState = self.stateEstimator.getEstimState(tmpState, U)

            #print estimNextState

            self.arm.setState(realNextState)

            #computation of the cost
            cost += self.trajCost.computeStateTransitionCost(Unoisy)
            self.costU12 += self.trajCost.computeStateTransitionCostU12(Unoisy)
            '''
            print "U =", U
            print "Unoisy =", Unoisy
            print "estimstate =", estimState
            #print "theta =", self.controller.theta
            if math.isnan(cost):
                print "NAN : U =", U
                print "NAN : Unoisy =", Unoisy
                print "NAN : estimstate =", estimState
                #print "NAN : theta =", self.controller.theta
                sys.exit()
            '''

            #get dotq and q from the state vector
            dotq, q = self.arm.getDotQAndQFromStateVector(tmpState)
            coordElbow, coordHand = self.arm.mgdFull(q)
            #print ("dotq :",dotq)
            #computation of the coordinates to check if the target is reach or not
            #code to save data of the trajectory

            #Note : these structures might be much improved
            if self.saveTraj == True:
                stepStore.append(vectarget)
                stepStore.append(estimState)
                stepStore.append(tmpState)
                stepStore.append(Unoisy)
                stepStore.append(np.array(U))
                stepStore.append(estimNextState)
                stepStore.append(realNextState)
                stepStore.append([coordElbow[0], coordElbow[1]])
                stepStore.append([coordHand[0], coordHand[1]])
                #print ("before",stepStore)
                tmpstore = np.array(stepStore).flatten()
                row = [item for sub in tmpstore for item in sub]
                #print ("store",row)
                dataStore.append(row)

            estimState = estimNextState
            i += 1
            t += self.rs.dt

        cost += self.trajCost.computeFinalReward(self.arm, t, coordHand,
                                                 self.sizeOfTarget)

        if self.saveTraj == True:
            filename = findDataFilename(foldername + "Log/",
                                        "traj" + str(x) + "-" + str(y) + ".",
                                        ".log")
            np.savetxt(filename, dataStore)
            '''
            if coordHand[0] >= -self.sizeOfTarget/2 and coordHand[0] <= self.sizeOfTarget/2 and coordHand[1] >= self.rs.YTarget and i<230:
                foldername = pathDataFolder + "TrajRepository/"
                name = findFilename(foldername,"Traj",".traj")
                np.savetxt(name,dataStore)
            '''

        lastX = -1000  #used to ignore dispersion when the target line is not crossed
        if coordHand[1] >= self.rs.YTarget:
            lastX = coordHand[0]
        #print "end of trajectory"
        return cost, t, lastX