Example #1
0
def UserGoalsMidca(domainFile,
                   stateFile,
                   display=print,
                   goalsFile=None,
                   argsPyHopPlanner=[]):
    world = domainread.load_domain(domainFile)
    stateread.apply_state_file(world, stateFile)
    #creates a PhaseManager object, which wraps a MIDCA object
    myMidca = base.PhaseManager(world, display=display, verbose=4)
    #add phases by name
    for phase in [
            "Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan",
            "Act"
    ]:
        myMidca.append_phase(phase)

        #add the modules which instantiate basic blocksworld operation
    myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
    myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
    myMidca.append_module("Perceive", perceive.PerfectObserver())
    myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
    #myMidca.append_module("Interpret", guide.UserGoalInput())
    myMidca.append_module("Eval", evaluate.SimpleEval())
    myMidca.append_module("Intend", intend.SimpleIntend())
    myMidca.append_module("Plan", planning.PyHopPlanner(*argsPyHopPlanner))
    myMidca.append_module("Act", act.SimpleAct())
    return myMidca
def UserGoalsMidca(domainFile, stateFile, goalsFile = None, extinguish = False):
    world = domainread.load_domain(domainFile)
    stateread.apply_state_file(world, stateFile)
    myMidca = base.PhaseManager(world, verbose=1, display = asqiiDisplay, metaEnabled=True)

    # add cognitive layer phases
    for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
        myMidca.append_phase(phase)

    # add cognitive layer modules
    myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
    myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
    myMidca.append_module("Perceive", perceive.PerfectObserver())
    myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
    myMidca.append_module("Interpret", guide.UserGoalInput())
    myMidca.append_module("Eval", evaluate.SimpleEval())
    myMidca.append_module("Intend", intend.SimpleIntend())
    myMidca.append_module("Plan", planningbroken.PyHopPlannerBroken(extinguish))
    myMidca.append_module("Act", act.SimpleAct())

    # add meta layer phases
    #for phase in ["Monitor", "Interpret", "Eval", "Intend", "Plan", "Control"]:
    for phase in ["Monitor", "Interpret", "Intend", "Plan", "Control"]:
        myMidca.append_meta_phase(phase)

    # add meta layer modules
    myMidca.append_meta_module("Monitor", monitor.MRSimpleMonitor())
    myMidca.append_meta_module("Interpret", interpret.MRSimpleDetect())
    myMidca.append_meta_module("Interpret", interpret.MRSimpleGoalGen())
    myMidca.append_meta_module("Intend", metaintend.MRSimpleIntend())
    myMidca.append_meta_module("Plan", plan.MRSimplePlanner())
    myMidca.append_meta_module("Control", control.MRSimpleControl())

    return myMidca
Example #3
0
 def save_30_buildings(self):
     '''
     save the 30 building goals into the 30_problem_set_ijcai.pickle file
     '''
     a = {}
     for k in range(0, 30):
         self.curr_goal_sets[:] = []
         select_buildings = 0
         index = []
         objs_names = self.world.get_objects_names_by_type("BLOCK")
         # remove the object table since we need only blocks
         objs_names.remove("table")
         random.shuffle(objs_names)
         # builds the gaol sets
         #self.build_current_goal_sets(objs_names)
         self.build_current_goal_sets_same(objs_names)
         atoms = self.world.get_atoms()
         self.remove_bunch_atoms(atoms)
         stateread.apply_state_file(self.world, self.stateFile)
         stateread.apply_state_str(self.world, self.state_str)
         self.mem.add(self.mem.STATES, copy.deepcopy(self.world))
         self.mem.set(self.mem.TIME_CONSTRUCTION, self.Time)
         #print(self.world)
         # empty the selected goals list
         del self.selected_goals[:]
         # generate some random goals through random function on current goal set
         # this random function is for no:of buildings
         select_buildings = random.randint(2, len(self.curr_goal_sets) - 1)
         print(("NO.OF BUILDINGS TO CONSTRUCT: " + str(select_buildings)))
         # this is for the random indexes , that should be taken from the variable self.curr_goal_sets
         # compute the index list with in the range of 0 and no:of buildings
         index = random.sample(list(range(0, len(self.curr_goal_sets))),
                               select_buildings)
         print("THE BUILDINGS ARE: ")
         print("[")
         for i in index:
             # since self.curr_goal_sets is in a structure of list in a list
             # we should iterate through the list completely
             print(("Tower " + self.curr_goal_sets[i][0].args[0]), end=' ')
             if (len(self.curr_goal_sets[i]) == 1):
                 print(("(" + str(len(self.curr_goal_sets[i])) + " Block)"))
             else:
                 print(
                     ("(" + str(len(self.curr_goal_sets[i])) + " Blocks)"))
             for j in self.curr_goal_sets[i]:
                 #print(j)
                 self.selected_goals.append(j)
             #print("")
         print("]")
         print("")
         random.shuffle(index)
         print(index)
         a[k] = [
             self.curr_goal_sets, select_buildings,
             copy.deepcopy(index)
         ]
         with open('30_problem_set_ijcai.pickle', 'wb') as handle:
             pickle.dump(a, handle)
Example #4
0
def guiMidca(domainFile, stateFile, goalsFile = None):
	world = domainread.load_domain(domainFile)
	stateread.apply_state_file(world, stateFile)
	myMidca = base.PhaseManager(world, display = asqiiDisplay)
	for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
		myMidca.append_phase(phase)

	myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
	myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
	myMidca.append_module("Perceive", perceive.PerfectObserver())
	myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
	myMidca.append_module("Eval", evaluate.SimpleEval())
	myMidca.append_module("Intend", intend.SimpleIntend())
	myMidca.append_module("Plan", planning.PyHopPlanner())
	myMidca.append_module("Act", act.SimpleAct())
	return myMidca
Example #5
0
def guiMidca(domainFile, stateFile, goalsFile = None):
	world = domainread.load_domain(domainFile)
	stateread.apply_state_file(world, stateFile)
	myMidca = base.PhaseManager(world, display = asqiiDisplay)
	for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
		myMidca.append_phase(phase)

	myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
	myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
	myMidca.append_module("Perceive", perceive.PerfectObserver())
	myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
	myMidca.append_module("Eval", evaluate.SimpleEval())
	myMidca.append_module("Intend", intend.SimpleIntend())
	myMidca.append_module("Plan", planning.PyHopPlanner())
	myMidca.append_module("Act", act.SimpleAct())
	return myMidca
Example #6
0
def UserGoalsMidca(domainFile, stateFile, display=print, goalsFile = None, argsPyHopPlanner=[]):
    world = domainread.load_domain(domainFile)
    stateread.apply_state_file(world, stateFile)
        #creates a PhaseManager object, which wraps a MIDCA object
    myMidca = base.PhaseManager(world, display = display, verbose=4)
        #add phases by name
    for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
        myMidca.append_phase(phase)

        #add the modules which instantiate basic blocksworld operation
    myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
    myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
    myMidca.append_module("Perceive", perceive.PerfectObserver())
    myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
    #myMidca.append_module("Interpret", guide.UserGoalInput())
    myMidca.append_module("Eval", evaluate.SimpleEval())
    myMidca.append_module("Intend", intend.SimpleIntend())
    myMidca.append_module("Plan", planning.PyHopPlanner(*argsPyHopPlanner))
    myMidca.append_module("Act", act.SimpleAct())
    return myMidca
Example #7
0
 def next_goal(self):
     # empty the previous goals and set the money to default in memory
     self.mem.set(self.mem.MONEY, self.money)
     # remove all the atoms in the world
     atoms = self.world.get_atoms()
     self.remove_bunch_atoms(atoms)
     # whenever we get a new goal, first initialize the world to it's default state
     stateread.apply_state_file(self.world, self.stateFile)
     stateread.apply_state_str(self.world, self.state_str)
     self.mem.add(self.mem.STATES, copy.deepcopy(self.world))
     # Create the random set of goals with random dishes and persons
     self.curr_goal_sets[:] = []
     self.create_random_goals()
     #self.save_30_goal_sets()
     #self.write_memory_to_file()
     #self.select_random_goals_from_30_sets()
     # print the generated goals
     print("")
     for each_goal in self.curr_goal_sets:
         print(each_goal)
     print("")
     # return the generated goals
     return self.curr_goal_sets
Example #8
0
thisDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))

MIDCA_ROOT = thisDir + "/../"

domainFile = MIDCA_ROOT + "domains/blocksworld/arsonist_extinguish.sim"
stateFile = MIDCA_ROOT + "domains/blocksworld/states/extinguisher_state.sim"
extinguish = True

argsPyHopPlanner = [util.pyhop_state_from_world,
					util.pyhop_tasks_from_goals,
					DECLARE_METHODS_FUNC,
					DECLARE_OPERATORS_FUNC,
					extinguish]

world = domainread.load_domain(domainFile)
stateread.apply_state_file(world, stateFile)
#creates a PhaseManager object, which wraps a MIDCA object
myMidca = base.PhaseManager(world, display = util.asqiiDisplay, verbose=4)
#add phases by name
for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
	myMidca.append_phase(phase)

#add the modules which instantiate basic blocksworld operation
myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
myMidca.append_module("Perceive", PerfectObserver.PerfectObserver())
myMidca.append_module("Interpret", ADistanceAnomalyNoter.ADistanceAnomalyNoter())
#myMidca.append_module("Interpret", guide.UserGoalInput())
myMidca.append_module("Eval", SimpleEval.SimpleEval())
myMidca.append_module("Intend", SimpleIntend.SimpleIntend())
myMidca.append_module("Plan", PyHopPlanner.PyHopPlanner(*argsPyHopPlanner))
Example #9
0
        #    return "incomplete"
        # if there is no state do not send report to meta aqua
        if not self.finalstate:
            return "incomplete"
        else:
            self.namecounts["report"] += 1
            s = "(" + "report." + str(self.namecounts["report"]) + "\n("
            for action in self.actions:
                s += "\t(\n"
                s += self.action_str(action)
                s += "\n\"\")\n("
                s += self.state_str(self.finalstate)
                s += "\n\"\")\n"
            return s + "))"

'''
ma = MAReport()
import domainread, stateread
world = domainread.load_domain("./domain.sim")
stateread.apply_state_file(world, "./defstate.sim")
ma.finalstate = world
ma.actions.append(["unstack", "block1", "block2"])
ma.actions.append(["catchfire", "block1"])
print ma
'''

class MAReporter(base.BaseModule):

    '''
    MIDCA module that sends a report on the world and actions to the
    Meta-AQUA story understanding system. This requires Meta-AQUA to be
Example #10
0
    def next_goal(self, write_to_file=False):
        actual_time = self.mem.get(self.mem.ACTUAL_TIME_CONSTRUCTION)
        expected_time = self.mem.get(self.mem.EXPECTED_TIME_CONSTRUCTION)
        selected_buildings = self.mem.get(self.mem.SELECTED_BUILDING_LIST)
        complete_buildings = self.mem.get(self.mem.COMPLETE_BUILDING_LIST)
        executed_buildings = self.mem.get(self.mem.EXECUTED_BUILDING_LIST)
        actual_scores = self.mem.get(self.mem.ACTUAL_SCORES)
        if executed_buildings:
            expected_scores = sum(executed_buildings)
        else:
            expected_scores = 0
        if actual_time and expected_time:
            expected_p_t = float(expected_scores / expected_time[0])
            actual_p_t = float(actual_scores / actual_time[0])

            if write_to_file:
                with open("evaluation.txt", "a") as myfile:

                    myfile.write(
                        "%-5s %-5s %-5s %-5s " %
                        (str(complete_buildings), str(selected_buildings),
                         str(expected_time), str(actual_time)))
                    myfile.write("%-5s %-5s %-5s %-5s %-5s " %
                                 (str([expected_scores]), str([actual_scores]),
                                  str([expected_p_t]), str(
                                      [actual_p_t]), str(executed_buildings)))
                    myfile.write("\n")

            self.mem.set(self.mem.ACTUAL_TIME_CONSTRUCTION, None)
            self.mem.set(self.mem.EXPECTED_TIME_CONSTRUCTION, None)
            self.mem.set(self.mem.SELECTED_BUILDING_LIST, None)
            self.mem.set(self.mem.COMPLETE_BUILDING_LIST, None)
            self.mem.set(self.mem.EXECUTED_BUILDING_LIST, None)
            self.mem.set(self.mem.ACTUAL_SCORES, None)

        #print(self.mem.get(self.mem.STATES)[-1])
        #print(self.initial_world)
        # initiate the world
        #predicateworld.asqiiDisplay(world)
        #self.world =  self.initial_world.copy()
        #print("Modified world")
        #print(self.world)
        #base.MIDCA.update_world(self,self.initial_world.copy())
        #self.mem.add(self.mem.STATES, self.initial_world.copy())
        #print(self.mem.get(self.mem.STATES)[-1]
        #atoms = self.initial_world.get_atoms()
        #atoms1 = self.initial_world.get_atoms()
        #for atom in atoms1:
        #       self.world.add_atom(atom)
        # get all the object names of type block
        self.curr_goal_sets[:] = []
        objs_names = self.world.get_objects_names_by_type("BLOCK")
        # remove the object table since we need only blocks
        objs_names.remove("table")
        random.shuffle(objs_names)
        # builds the gaol sets
        #self.build_current_goal_sets(objs_names)
        self.build_current_goal_sets_same(objs_names)
        atoms = self.world.get_atoms()
        self.remove_bunch_atoms(atoms)
        stateread.apply_state_file(self.world, self.stateFile)
        stateread.apply_state_str(self.world, self.state_str)
        self.mem.add(self.mem.STATES, copy.deepcopy(self.world))
        self.mem.set(self.mem.TIME_CONSTRUCTION, self.Time)
        #print(self.world)
        # empty the selected goals list
        del self.selected_goals[:]
        # generate some random goals through random function on current goal set
        # this random function is for no:of buildings
        select_buildings = random.randint(2, len(self.curr_goal_sets) - 1)
        print(("NO.OF BUILDINGS TO CONSTRUCT: " + str(select_buildings)))
        # this is for the random indexes , that should be taken from the variable self.curr_goal_sets
        # compute the index list with in the range of 0 and no:of buildings
        index = random.sample(list(range(0, len(self.curr_goal_sets))),
                              select_buildings)
        print("THE BUILDINGS ARE: ")
        print("[")
        for i in index:
            # since self.curr_goal_sets is in a structure of list in a list
            # we should iterate through the list completely
            print(("Tower " + self.curr_goal_sets[i][0].args[0]), end=' ')
            if (len(self.curr_goal_sets[i]) == 1):
                print(("(" + str(len(self.curr_goal_sets[i])) + " Block)"))
            else:
                print(("(" + str(len(self.curr_goal_sets[i])) + " Blocks)"))
            for j in self.curr_goal_sets[i]:
                #print(j)
                self.selected_goals.append(j)
        print("]")
        print("")

        return self.selected_goals
Example #11
0
    def next_goal_30_goal_transformations(self, write_to_file=False):
        selected_buildings = self.mem.get(self.mem.SELECTED_BUILDING_LIST)
        executed_buildings = self.mem.get(self.mem.EXECUTED_BUILDING_LIST)
        actual_scores = self.mem.get(self.mem.ACTUAL_SCORES)

        if executed_buildings:
            myfile = open('evaluation.csv', "a")
            writer = csv.writer(myfile,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_ALL)
            with open("evaluation.csv", "a") as myfile:
                data = [(self.count), selected_buildings, actual_scores,
                        executed_buildings]
                writer.writerow(data)
            self.mem.set(self.mem.SELECTED_BUILDING_LIST, None)
            self.mem.set(self.mem.EXECUTED_BUILDING_LIST, None)
            self.mem.set(self.mem.ACTUAL_SCORES, None)
        else:
            myfile = open('evaluation.csv', "a")
            writer = csv.writer(myfile,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_ALL)
            data = [
                "S.NO", "PROBLEM SET", "ACTUAL SCORES", "CONSTRUCTED BUILDINGS"
            ]
            writer.writerow(data)

        self.curr_goal_sets[:] = []
        objs_names = self.world.get_objects_names_by_type("BLOCK")
        # remove the object table since we need only blocks
        objs_names.remove("table")
        random.shuffle(objs_names)
        # builds the gaol sets
        self.build_current_goal_sets(objs_names)
        atoms = self.world.get_atoms()
        self.remove_bunch_atoms(atoms)
        stateread.apply_state_file(self.world, self.stateFile)
        stateread.apply_state_str(self.world, self.state_str)
        self.mem.add(self.mem.STATES, copy.deepcopy(self.world))
        self.mem.set(self.mem.TIME_CONSTRUCTION, self.Time)
        print((self.world))
        # empty the selected goals list
        del self.selected_goals[:]

        with open('30_problem_set.pickle', 'rb') as handle:
            a = pickle.load(handle)
        b = a[self.count]
        self.count = self.count + 1
        select_buildings = b[1]
        print(("NO.OF BUILDINGS TO CONSTRUCT: " + str(select_buildings)))
        # this is for the random indexes , that should be taken from the variable self.curr_goal_sets
        # compute the index list with in the range of 0 and no:of buildings
        index = b[2]
        self.curr_goal_sets = b[0]
        for each in self.curr_goal_sets:
            for goal in each:
                if goal["predicate"] == "on":
                    goal["predicate"] = "stable-on"
        print("THE BUILDINGS ARE: ")
        print("[")
        for i in index:
            # since self.curr_goal_sets is in a structure of list in a list
            # we should iterate through the list completely
            print(("Tower " + self.curr_goal_sets[i][0].args[0]), end=' ')
            if (len(self.curr_goal_sets[i]) == 1):
                print(("(" + str(len(self.curr_goal_sets[i])) + " Block)"))
            else:
                print(("(" + str(len(self.curr_goal_sets[i])) + " Blocks)"))
            for j in self.curr_goal_sets[i]:
                #print(j)
                self.selected_goals.append(j)
            #print("")
        print("]")
        print("")

        return self.selected_goals
Example #12
0
    def next_goal_30(self):
        '''
        Generate the goals from the 30 sets
        '''
        actual_time = self.mem.get(self.mem.ACTUAL_TIME_CONSTRUCTION)
        expected_time = self.mem.get(self.mem.EXPECTED_TIME_CONSTRUCTION)
        selected_buildings = self.mem.get(self.mem.SELECTED_BUILDING_LIST)
        complete_buildings = self.mem.get(self.mem.COMPLETE_BUILDING_LIST)
        executed_buildings = self.mem.get(self.mem.EXECUTED_BUILDING_LIST)
        actual_scores = self.mem.get(self.mem.ACTUAL_SCORES)
        P = self.mem.get(self.mem.P)
        t = self.mem.get(self.mem.t)
        P = str(P)
        t = str(t)
        P = P.replace("[...]", "")
        t = t.replace("[...]", "")
        if not actual_scores:
            actual_scores = 0

        if executed_buildings:
            expected_scores = sum(selected_buildings)
        else:
            expected_scores = 0
        if complete_buildings:
            if expected_time:
                expected_p_t = float(expected_scores / expected_time)
            else:
                e_t = None
                e_p_t = None
                e_s = None

            if actual_time:
                actual_p_t = float(actual_scores / actual_time)
            else:
                a_t = None
                a_s = None
                a_p_t = None

            myfile = open('evaluation.csv', "a")
            writer = csv.writer(myfile,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_ALL)
            with open("evaluation.csv", "a") as myfile:
                if expected_time:
                    e_t = round(expected_time, 2)
                    e_p_t = round(expected_p_t, 2)
                if actual_time:
                    a_t = round(actual_time, 2)
                    a_p_t = round(actual_p_t, 2)
                if expected_scores:
                    e_s = round(expected_scores, 2)
                if expected_scores == 0:
                    e_s = 0
                if actual_scores:
                    a_s = round(actual_scores, 2)

                if actual_scores == 0:
                    a_s = 0
                e_b = executed_buildings
                data = [
                    self.count, complete_buildings, selected_buildings, e_t,
                    a_t, e_s, a_s, e_p_t, a_p_t, e_b,
                    str(P),
                    str(t)
                ]
                writer.writerow(data)
            self.mem.set(self.mem.ACTUAL_TIME_CONSTRUCTION, None)
            self.mem.set(self.mem.EXPECTED_TIME_CONSTRUCTION, None)
            self.mem.set(self.mem.SELECTED_BUILDING_LIST, None)
            self.mem.set(self.mem.COMPLETE_BUILDING_LIST, None)
            self.mem.set(self.mem.EXECUTED_BUILDING_LIST, None)
            self.mem.set(self.mem.ACTUAL_SCORES, None)
            self.mem.set(self.mem.P, None)
            self.mem.set(self.mem.t, None)

        else:
            myfile = open('evaluation.csv', "a")
            writer = csv.writer(myfile,
                                delimiter=',',
                                quotechar='"',
                                quoting=csv.QUOTE_ALL)
            data = [
                "S.NO", "PROBLEM SET", "SELECTED TOWERS", "EXPECTED TIME",
                "ACTUAL TIME", "EXPECTED SCORES", "ACTUAL SCORES",
                "EXPECTED(P/T)", "ACTUAL(P/T) ", "CONSTRUCTED BUILDINGS", "P",
                "t"
            ]
            writer.writerow(data)

        self.curr_goal_sets[:] = []
        objs_names = self.world.get_objects_names_by_type("BLOCK")
        # remove the object table since we need only blocks
        objs_names.remove("table")
        random.shuffle(objs_names)
        # builds the gaol sets
        self.build_current_goal_sets(objs_names)
        atoms = self.world.get_atoms()
        self.remove_bunch_atoms(atoms)
        stateread.apply_state_file(self.world, self.stateFile)
        stateread.apply_state_str(self.world, self.state_str)
        self.mem.add(self.mem.STATES, copy.deepcopy(self.world))
        self.mem.set(self.mem.TIME_CONSTRUCTION, self.Time)
        #print(self.world)
        # empty the selected goals list
        del self.selected_goals[:]

        with open('30_problem_set_ijcai.pickle', 'rb') as handle:
            a = pickle.load(handle)
        b = a[self.count]
        self.count = self.count + 1
        select_buildings = b[1]
        print(("NO.OF BUILDINGS TO CONSTRUCT: " + str(select_buildings)))
        # this is for the random indexes , that should be taken from the variable self.curr_goal_sets
        # compute the index list with in the range of 0 and no:of buildings
        index = b[2]
        self.curr_goal_sets = b[0]
        print("THE BUILDINGS ARE: ")
        print("[")
        for i in index:
            # since self.curr_goal_sets is in a structure of list in a list
            # we should iterate through the list completely
            print(("Tower " + self.curr_goal_sets[i][0].args[0]), end=' ')
            if (len(self.curr_goal_sets[i]) == 1):
                print(("(" + str(len(self.curr_goal_sets[i])) + " Block)"))
            else:
                print(("(" + str(len(self.curr_goal_sets[i])) + " Blocks)"))
            for j in self.curr_goal_sets[i]:
                #print(j)
                self.selected_goals.append(j)
            #print("")
        print("]")
        print("")

        return self.selected_goals
Example #13
0
    def createMIDCAObj(self):
        # in this demo, always keep extinguish to false
        extinguish = False

        thisDir = os.path.dirname(
            os.path.abspath(inspect.getfile(inspect.currentframe())))

        MIDCA_ROOT = thisDir + "/../"

        domainFile = MIDCA_ROOT + "worldsim/domains/arsonist.sim"
        stateFile = MIDCA_ROOT + "worldsim/states/defstate.sim"

        self.world = domainread.load_domain(domainFile)
        stateread.apply_state_file(self.world, stateFile)
        # creates a PhaseManager object, which wraps a MIDCA object
        myMidca = base.PhaseManager(self.world,
                                    display=asqiiDisplay,
                                    verbose=4)
        #asqiiDisplay(world)
        # add phases by name
        for phase in [
                "Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan",
                "Act"
        ]:
            myMidca.append_phase(phase)

        # add the modules which instantiate basic blocksworld operation
        myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
        myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
        myMidca.append_module("Perceive", perceive.PerfectObserver())
        myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
        # need to make sure to disable all user input modules #myMidca.append_module("Interpret", guide.UserGoalInput())
        myMidca.append_module("Eval", evaluate.SimpleEval())
        myMidca.append_module("Intend", intend.SimpleIntend())
        myMidca.append_module("Plan", planning.PyHopPlanner(extinguish))
        myMidca.append_module("Act", act.SimpleAct())

        myMidca.insert_module(
            'Simulate',
            simulator.ArsonSimulator(arsonChance=self.arsonChanceArg,
                                     arsonStart=10), 1)
        myMidca.insert_module('Simulate', simulator.FireReset(), 0)
        myMidca.insert_module('Interpret', guide.TFStack(), 1)

        if self.usingTFTreeFire:
            myMidca.insert_module('Interpret', guide.TFFire(), 2)

        if self.usingSimulatedMA:
            myMidca.insert_module('Interpret', guide.ReactiveApprehend(), 3)

        myMidca.insert_module(
            'Eval', evaluate.Scorer(),
            1)  # this needs to be a 1 so that Scorer happens AFTER SimpleEval

        def preferApprehend(goal1, goal2):
            if 'predicate' not in goal1 or 'predicate' not in goal2:
                return 0
            elif goal1['predicate'] == 'free' and goal2['predicate'] != 'free':
                return -1
            elif goal1['predicate'] != 'free' and goal2['predicate'] == 'free':
                return 1
            elif goal1['predicate'] == 'onfire' and goal2[
                    'predicate'] != 'onfire':
                return -1
            elif goal1['predicate'] != 'onfire' and goal2[
                    'predicate'] == 'onfire':
                return 1
            return 0

        # tells the PhaseManager to copy and store MIDCA states so they can be accessed later.
        myMidca.storeHistory = False
        myMidca.initGoalGraph(cmpFunc=preferApprehend)
        ## DO NOT DO THIS: experiment.py will do this automatically: myMidca.init()

        print("Created MIDCA " + str(id(myMidca)) + " w/ arsonchance=" +
              str(self.arsonChanceArg) + ", usingTFTreeFire=" +
              str(self.usingTFTreeFire) + ",usingSimMA=" +
              str(self.usingSimulatedMA))

        self.myMidca = myMidca
        self.initialized = True
Example #14
0
thisDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))

MIDCA_ROOT = thisDir + "/../"

domainFile = MIDCA_ROOT + "domains/blocksworld/domains/arsonist_extinguish.sim"
stateFile = MIDCA_ROOT + "domains/blocksworld/states/extinguisher_state.sim"
extinguish = True

argsPyHopPlanner = [util.pyhop_state_from_world,
					util.pyhop_tasks_from_goals,
					DECLARE_METHODS_FUNC,
					DECLARE_OPERATORS_FUNC,
					extinguish]

world = domainread.load_domain(domainFile)
stateread.apply_state_file(world, stateFile)
#creates a PhaseManager object, which wraps a MIDCA object
myMidca = base.PhaseManager(world, display = util.asqiiDisplay, verbose=4)
#add phases by name
for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
	myMidca.append_phase(phase)

#add the modules which instantiate basic blocksworld operation
myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
myMidca.append_module("Perceive", perceive.PerfectObserver())
myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
#myMidca.append_module("Interpret", guide.UserGoalInput())
myMidca.append_module("Eval", evaluate.SimpleEval())
myMidca.append_module("Intend", intend.SimpleIntend())
myMidca.append_module("Plan", planning.PyHopPlanner(*argsPyHopPlanner))
GOAL_GRAPH_CMP_FUNC = util.preferApprehend

extinguish=False
mortar=True
world = domainread.load_domain(DOMAIN_FILE)

# for state file, need to add number of mortar blocks to begin with
state_str = open(STATE_FILE).read() # first read file
# now add new mortar blocks
for i in range(MORTAR_COUNT):
    state_str+="MORTARBLOCK(M"+str(i)+")\n"
    state_str+="available(M"+str(i)+")\n"
# now load the state    
stateread.apply_state_str(world, state_str)

stateread.apply_state_file(world, STATE_FILE)
    #creates a PhaseManager object, which wraps a MIDCA object
myMidca = base.PhaseManager(world, display = DISPLAY_FUNC, verbose=4, metaEnabled=True)


    #add phases by name
for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
    myMidca.append_phase(phase)

    #add the modules which instantiate basic blocksworld operation
myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
myMidca.append_module("Simulate", simulator.ASCIIWorldViewer(display=DISPLAY_FUNC))
myMidca.append_module("Perceive", perceive.PerfectObserver())
myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
#myMidca.append_module("Interpret", guide.UserGoalInput())
myMidca.append_module("Eval", evaluate.SimpleEval())
Example #16
0
print thisDir

MIDCA_ROOT = thisDir + "/../"
print MIDCA_ROOT

### Domain Specific Variables
DOMAIN_ROOT = MIDCA_ROOT + "domains/logistics/"
DOMAIN_FILE = DOMAIN_ROOT + "domains/domain2.sim"
STATE_FILE = DOMAIN_ROOT + "states/defstate2.sim"

### Domain Specific Variables for JSHOP planner
JSHOP_DOMAIN_FILE = MIDCA_ROOT + "domains/jshop_domains/logistics/logistics"
JSHOP_STATE_FILE = MIDCA_ROOT + "domains/jshop_domains/logistics/problem"

world = domainread.load_domain(DOMAIN_FILE)
stateread.apply_state_file(world, STATE_FILE)
#creates a PhaseManager object, which wraps a MIDCA object
myMidca = base.PhaseManager(world, display='', verbose=4)
#add phases by name
for phase in [
        "Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"
]:
    myMidca.append_phase(phase)

#add the modules which instantiate basic blocksworld operation
myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
#myMidca.append_module("Simulate", simulator.ASCIIWorldViewer(display=DISPLAY_FUNC))
myMidca.append_module("Perceive", perceive.PerfectObserverWithThief())
myMidca.append_module("Interpret", guide.DeliverGoal())
#myMidca.append_module("Interpret", guide.UserGoalInput())
myMidca.append_module("Eval", evaluate.SimpleEval2())
Example #17
0
    def createMIDCAObj(self):
        # in this demo, always keep extinguish to false
            extinguish = False

            thisDir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))

            MIDCA_ROOT = thisDir + "/../"

            domainFile = MIDCA_ROOT + "worldsim/domains/arsonist.sim"
            stateFile = MIDCA_ROOT + "worldsim/states/defstate.sim"

            self.world = domainread.load_domain(domainFile)
            stateread.apply_state_file(self.world, stateFile)
            # creates a PhaseManager object, which wraps a MIDCA object
            myMidca = base.PhaseManager(self.world, display=asqiiDisplay,verbose=4)
            #asqiiDisplay(world)
            # add phases by name
            for phase in ["Simulate", "Perceive", "Interpret", "Eval", "Intend", "Plan", "Act"]:
                myMidca.append_phase(phase)

            # add the modules which instantiate basic blocksworld operation
            myMidca.append_module("Simulate", simulator.MidcaActionSimulator())
            myMidca.append_module("Simulate", simulator.ASCIIWorldViewer())
            myMidca.append_module("Perceive", perceive.PerfectObserver())
            myMidca.append_module("Interpret", note.ADistanceAnomalyNoter())
            # need to make sure to disable all user input modules #myMidca.append_module("Interpret", guide.UserGoalInput())
            myMidca.append_module("Eval", evaluate.SimpleEval())
            myMidca.append_module("Intend", intend.SimpleIntend())
            myMidca.append_module("Plan", planning.PyHopPlanner(extinguish))
            myMidca.append_module("Act", act.SimpleAct())

            myMidca.insert_module('Simulate', simulator.ArsonSimulator(arsonChance=self.arsonChanceArg, arsonStart=10), 1)
            myMidca.insert_module('Simulate', simulator.FireReset(), 0)
            myMidca.insert_module('Interpret', guide.TFStack(), 1)

            if self.usingTFTreeFire:
                myMidca.insert_module('Interpret', guide.TFFire(), 2)

            if self.usingSimulatedMA:
                myMidca.insert_module('Interpret', guide.ReactiveApprehend(), 3)

            myMidca.insert_module('Eval', evaluate.Scorer(), 1)  # this needs to be a 1 so that Scorer happens AFTER SimpleEval

            def preferApprehend(goal1, goal2):
                if 'predicate' not in goal1 or 'predicate' not in goal2:
                    return 0
                elif goal1['predicate'] == 'free' and goal2['predicate'] != 'free':
                    return -1
                elif goal1['predicate'] != 'free' and goal2['predicate'] == 'free':
                    return 1
                elif goal1['predicate'] == 'onfire' and goal2['predicate'] != 'onfire':
                    return -1
                elif goal1['predicate'] != 'onfire' and goal2['predicate'] == 'onfire':
                    return 1
                return 0

            # tells the PhaseManager to copy and store MIDCA states so they can be accessed later.
            myMidca.storeHistory = False
            myMidca.initGoalGraph(cmpFunc=preferApprehend)
            ## DO NOT DO THIS: experiment.py will do this automatically: myMidca.init()

            print "Created MIDCA "+str(id(myMidca))+" w/ arsonchance="+str(self.arsonChanceArg)+", usingTFTreeFire="+str(self.usingTFTreeFire)+",usingSimMA="+str(self.usingSimulatedMA)

            self.myMidca = myMidca
            self.initialized = True