示例#1
0
    def graphplan(self):

        #initialization
        initState = self.initialState
        goalState = self.goal
        actions = self.actions
        level = 0

        #create first layer of the graph, note it only has a proposition layer which consists of the initial state.
        propLayerInit = PropositionLayer()
        for prop in initState:
            propLayerInit.addProposition(prop)
        pgInit = RelaxedPlanGraph(0, actions)
        pgInit.setPropositionLayer(propLayerInit)
        self.graph.append(pgInit)
        '''while the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph'''
        while ((self.goalStateNotInPropLayer(
                goalState,
                self.graph[level].getPropositionLayer().getPropositions()))):
            level = level + 1
            pgNext = RelaxedPlanGraph(
                level, self.actions)  #create new PlanGraph object
            pgNext.expand(
                self.graph[level - 1], self.propositions, self.actions
            )  #calls the expand function, which you are implementing in the PlanGraph class
            self.graph.append(
                deepcopy(pgNext))  #appending the new level to the plan graph

        return level
示例#2
0
 def expand(self, previousLevel, allProps, allActions): # you can change the params the function takes if you like
     previousPropositionLayer = previousLevel.getPropositionLayer()
     newActionLayer = ActionLayer()
     
     for action in allActions:
         if previousPropositionLayer.allPrecondsInLayer(action):
                 newActionLayer.addAction(action)
     self.actionLayer = newActionLayer
     
     newPropositionLayer = PropositionLayer()
     for prop in allProps:
         if newActionLayer.effectExists(prop):
             newPropositionLayer.addProposition(prop)
     # set new proposition layer
     self.setPropositionLayer(newPropositionLayer)
示例#3
0
文件: GraphPlan.py 项目: saagar/cs182
    def graphplan(self):

        #initialization
        initState = self.initialState
        goalState = self.goal
        level = 0
        self.noGoods = [] #make sure you update noGoods in your backward search!
        self.noGoods.append([])

        #create first layer of the graph, note it only has a proposition layer which consists of the initial state.
        propLayerInit = PropositionLayer()
        for prop in initState:
            propLayerInit.addProposition(prop)
        pgInit = PlanGraph(0, self.independentActions)
        pgInit.setPropositionLayer(propLayerInit)
        self.graph.append(pgInit)

        '''while the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph'''
        while((self.goalStateNotInPropLayer(goalState, self.graph[level].getPropositionLayer().getPropositions()) | self.goalStateHasMutex(goalState, self.graph[level].getPropositionLayer())) & (self.Fixed(level)==False)):
            self.noGoods.append([])
            level = level +1
            pgNext = PlanGraph(level, self.independentActions) #create new PlanGraph object
            pgNext.expand(self.graph[level-1], self.propositions, self.actions) #calls the expand function, which you are implementing in the PlanGraph class
            self.graph.append(copy.deepcopy(pgNext)) #appending the new level to the plan graph

        if (self.goalStateNotInPropLayer(goalState, self.graph[level].getPropositionLayer().getPropositions()) | self.goalStateHasMutex(goalState, self.graph[level].getPropositionLayer())):
            print 'could not find a plan'
            return None #this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed!

        sizeNoGood = len(self.noGoods[level]) #remember size of nogood table

        plan = self.extract(self.graph, goalState, level) #try to extract a plan since all of the goal propositions are in current graph level, and are not mutex
        while(plan==None): #while we didn't extract a plan successfully
            level = level+1
            self.noGoods.append([])
            pgNext = PlanGraph(level, self.independentActions) #create next level of the graph by expanding
            pgNext.expand(self.graph[level-1], self.propositions, self.actions) #create next level of the graph by expanding
            self.graph.append(copy.deepcopy(pgNext))
            plan = self.extract(self.graph, goalState, level) #try to extract a plan again
            if ((plan==None) & (self.Fixed(level))): #if failed and reached fixed point
                if sizeNoGood==len(self.noGoods[level]): #if size of nogood didn't change, means there's nothing more to do. We failed.
                    print 'could not find a plan'
                    return None
                sizeNoGood=len(self.noGoods[level]) #we didn't fail yet! update size of no good
        print "final plan"
        for act in plan:
            print act
        return plan
示例#4
0
 def graphplan(self):
     
     #initialization
     initState = self.initialState
     goalState = self.goal
     level = 0
     self.noGoods = [] #make sure you update noGoods in your backward search!
     self.noGoods.append([])
     
     #create first layer of the graph, note it only has a proposition layer which consists of the initial state.
     propLayerInit = PropositionLayer()
     for prop in initState:
         propLayerInit.addProposition(prop)
     pgInit = PlanGraph(0, self.independentActions)
     pgInit.setPropositionLayer(propLayerInit)
     self.graph.append(pgInit)
     
     '''while the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph'''
     while((self.goalStateNotInPropLayer(goalState, self.graph[level].getPropositionLayer().getPropositions()) | self.goalStateHasMutex(goalState, self.graph[level].getPropositionLayer())) & (self.Fixed(level)==False)):
         self.noGoods.append([])
         level = level +1
         pgNext = PlanGraph(level, self.independentActions) #create new PlanGraph object
         pgNext.expand(self.graph[level-1], self.propositions, self.actions) #calls the expand function, which you are implementing in the PlanGraph class
         self.graph.append(copy.deepcopy(pgNext)) #appending the new level to the plan graph
         
     if (self.goalStateNotInPropLayer(goalState, self.graph[level].getPropositionLayer().getPropositions()) | self.goalStateHasMutex(goalState, self.graph[level].getPropositionLayer())):
         print 'could not find a plan'
         return None #this means we stopped the while loop above because we reached a fixed point in the graph. nothing more to do, we failed!
     
     sizeNoGood = len(self.noGoods[level]) #remember size of nogood table
     
     plan = self.extract(self.graph, goalState, level) #try to extract a plan since all of the goal propositions are in current graph level, and are not mutex
     while(plan==None): #while we didn't extract a plan successfully             
         level = level+1 
         self.noGoods.append([])
         pgNext = PlanGraph(level, self.independentActions) #create next level of the graph by expanding
         pgNext.expand(self.graph[level-1], self.propositions, self.actions) #create next level of the graph by expanding
         self.graph.append(copy.deepcopy(pgNext))
         plan = self.extract(self.graph, goalState, level) #try to extract a plan again
         if ((plan==None) & (self.Fixed(level))): #if failed and reached fixed point
             if sizeNoGood==len(self.noGoods[level]): #if size of nogood didn't change, means there's nothing more to do. We failed.
                 print 'could not find a plan'
                 return None
             sizeNoGood=len(self.noGoods[level]) #we didn't fail yet! update size of no good
     print "final plan"
     for act in plan:
         print act
     return plan
示例#5
0
 def __init__(self, level):
     '''
     Constructor
     '''
     self.level = level
     self.actionLayer = ActionLayer()
     self.propositionLayer = PropositionLayer()
示例#6
0
    def expand(self, previousLevel, allProps, allActions
               ):  # you can change the params the function takes if you like
        previousPropositionLayer = previousLevel.getPropositionLayer()
        newActionLayer = ActionLayer()

        for action in allActions:
            if previousPropositionLayer.allPrecondsInLayer(action):
                newActionLayer.addAction(action)
        self.actionLayer = newActionLayer

        newPropositionLayer = PropositionLayer()
        for prop in allProps:
            if newActionLayer.effectExists(prop):
                newPropositionLayer.addProposition(prop)
        # set new proposition layer
        self.setPropositionLayer(newPropositionLayer)
示例#7
0
 def __init__(self, level, independentActions):
     '''
     Constructor
     '''
     self.level = level
     self.independentActions = independentActions  # a list of the independent actions (this would be the same at each level)
     self.actionLayer = ActionLayer()
     self.propositionLayer = PropositionLayer()
示例#8
0
    def expand(self, previousLevel, allProps, allActions
               ):  #you can change the params the function takes if you like
        Pk = PropositionLayer()
        Ak = ActionLayer()

        for action in allActions:
            pre = action.getPre()
            if not (False in [
                (item1
                 in previousLevel.getPropositionLayer().getPropositions())
                    for item1 in pre
            ]):
                if (not self.are_all_Mutex(
                        pre, previousLevel.getPropositionLayer())):
                    Ak.addAction(action)

        for (action1, action2) in combinations(Ak.getActions(), 2):
            if action1 != action2:
                if previousLevel.mutexActions(
                        action1, action2,
                        previousLevel.getPropositionLayer().getMutexProps()):
                    Ak.addMutexActions(action1, action2)

        for prop in allProps:
            for action in Ak.getActions():
                if action.isPosEffect(prop):
                    Pk.addProposition(prop)

        for (prop, prop2) in combinations(Pk.getPropositions(), 2):
            if (prop != prop2) and (self.mutexPropositions(
                    prop, prop2, Ak.getMutexActions())):
                Pk.addMutexProp(prop, prop2)

        self.setPropositionLayer(Pk)
        self.setActionLayer(Ak)
示例#9
0
 def graphplan(self):
     
     initState = self.initialState
     goalState = self.goal
     level = 0
     
     #create first layer of the graph, note it only has a proposition layer which consists of the initial state.
     propLayerInit = PropositionLayer()
     for prop in initState:
         propLayerInit.addProposition(prop)
     pgInit = RelaxedPlanGraph(0)
     pgInit.setPropositionLayer(propLayerInit)
     self.graph.append(pgInit)
     
     '''while the layer does not contain all of the propositions in the goal state, or some of these propositions are mutex in the layer we, and we have not reached the fixed point, continue expanding the graph'''
     while((self.goalStateNotInPropLayer(goalState, self.graph[level].getPropositionLayer().getPropositions()))):
         level = level +1
         pgNext = RelaxedPlanGraph(level) #create new PlanGraph object
         pgNext.expand(self.graph[level-1], self.propositions, self.actions) #calls the expand function, which you are implementing in the PlanGraph class
         self.graph.append(copy.deepcopy(pgNext)) #appending the new level to the plan graph
     
     return level
示例#10
0
    def expand(self, previousLevel, allProps, allActions
               ):  # you can change the params the function takes if you like
        previousPropositionLayer = previousLevel.getPropositionLayer()
        newActionLayer = ActionLayer()

        for action in allActions:
            if previousPropositionLayer.allPrecondsInLayer(action):
                newActionLayer.addAction(action)
        # add mutex actions
        for action1 in newActionLayer.getActions():
            for action2 in newActionLayer.getActions():
                actionPair = Pair(action1, action2)
                if action1 != action2 and self.mutexActions(
                        action1, action2,
                        previousPropositionLayer.getMutexProps()
                ) and actionPair not in newActionLayer.getMutexActions():
                    newActionLayer.addMutexActions(action1, action2)

        self.actionLayer = newActionLayer

        newPropositionLayer = PropositionLayer()
        for prop in allProps:
            if newActionLayer.effectExists(prop):
                newPropositionLayer.addProposition(prop)

        # add mutex propositions
        for prop1 in newPropositionLayer.getPropositions():
            for prop2 in newPropositionLayer.getPropositions():
                propPair = Pair(prop1, prop2)
                if prop1 != prop2 and self.mutexPropositions(
                        prop1, prop2, newActionLayer.getMutexActions()
                ) and propPair not in newPropositionLayer.getMutexProps():
                    newPropositionLayer.addMutexProp(prop1, prop2)

        # set new proposition layer
        self.setPropositionLayer(newPropositionLayer)
示例#11
0
文件: PlanGraph.py 项目: saagar/cs182
    def expand(self, previousLevel, allProps, allActions): #you can change the params the function takes if you like
        '''YOUR CODE HERE'''
        # gets things first so we don't get them over and over in our list comprehensions
        previousPropLayer = previousLevel.getPropositionLayer()
        previousActionLayer = previousLevel.getActionLayer()
        previousActions = previousActionLayer.getActions()

        previousProps = previousPropLayer.getPropositions()
        previousMutexProps = previousPropLayer.getMutexProps()

        A_k = ActionLayer()
        ###### this is for A_k (actions in next layer)
        for a in allActions:
            if (all(p in previousProps for p in a.getPre()) 
                and not any(previousPropLayer.isMutex(p1, p2) for p1, p2 in combinations(a.getPre(), 2))):
                A_k.addAction(a)

        ###### this is for mA_k (mutex actions in next layer)
        currentActions = A_k.getActions()
        for a1, a2 in combinations(currentActions, 2):
            if a1 != a2 and previousLevel.mutexActions(a1, a2, previousMutexProps):
                A_k.addMutexActions(a1, a2)

        self.setActionLayer(A_k)

        ###### this is for Pk (propositions in next layer)
        P_k = PropositionLayer()
        for p in allProps:
            if any(a.isPosEffect(p) for a in currentActions):
                P_k.addProposition(p)
        
        ###### this is for mPk (mutex propositions in next layer)
        A_k = self.getActionLayer()
        for p1, p2 in combinations(P_k.getPropositions(), 2):
            if p1 != p2 and self.mutexPropositions(p1, p2, A_k.getMutexActions()):
                P_k.addMutexProp(p1, p2)
        self.setPropositionLayer(P_k)

        return