def __init__(self,args): self.beam = args[0] if self.beam is None: self.beam = 0.0 self.N = args[1] self.boxlen = args[2] self.sigma = self.beam/(sqrt(8.0*log(2))) self.pixelwidth = max(int(self.N*self.sigma/(self.boxlen)), 1) self.scale = self.__CalculateScales() self.path = './' self.Writer = Observer([None, self.N, self.boxlen, './'])
def setUp(self): """ Method called before each test """ self.ut = obs.ObsUtility() self.myObservable = obs.Observable() self.name1 = "myObserver1" self.myObserver1 = obs.Observer(self.name1) self.name2 = "myObserver2" self.myObserver2 = obs.Observer(self.name2) pass
def __init__(self, name, remotes=None): self.name = name self.remotes = remotes self.state = 0 self.status = "running" log("Initializing robot", self.name) self.observer = Observer(self.name) self.thinker = Thinker(self.name) self.actor = Actor(self.name) self.session_done = False self.vague = False self.actions = [] self.rewards = [] self.cont_reward = [] if self.remotes: # Observer try: self.remotes['observer'] except: self.observer = Pyro4.Proxy(self.remotes['observer']) # Thinker try: self.remotes['thinker'] except: self.thinker = Pyro4.Proxy(self.remotes['thinker']) # Actor try: self.remotes['actor'] except: self.actor = Pyro4.Proxy(self.remotes['actor']) self.observer.initialize() self.observer.register_device('microphone', Microphone(self.name)) self.observer.register_device('camera', Camera(self.name)) self.actor.register_device('speaker', Speaker(self.name)) self.actor.register_device('eyes', Eyes(self.name))
def testObserver(self): """ Checks for Exception if strange input """ # Normal behavior self.assertEquals(self.myObserver1.name, self.name1) self.assertEquals(str(self.myObserver1), self.name1) self.assertRaises(TypeError, lambda: obs.Observer(42)) # tests message and update self.assertEquals(self.myObserver1.message, None) new_message = '{"name": "Bob","group": "","type": "","data": 42}' self.myObserver1.update(new_message) self.assertEquals(self.myObserver1.message, new_message) self.assertRaises(TypeError, lambda: self.myObserver1.update([4, 2])) self.assertRaises(TypeError, lambda: self.myObserver1.update(None))
def Activated(self): import Part doc = FreeCAD.activeDocument() length = 10 width = 20 hight = 40 box = Part.makeBox(length, width, hight) shapeobj = doc.addObject("Part::Feature", "MyShape") shapeobj.Shape = box doc.recompute() FreeCADGui.SendMsgToActiveView("ViewFit") import Observer #obs=Observer.SelectionObserver() #FreeCADGui.Selection.addObserver(obs) v = FreeCADGui.activeDocument().activeView() o = Observer.ViewObserver() c = v.addEventCallback("SoMouseButtonEvent", o.logPosition)
import Observer import Subject if __name__ == "__main__": weather_data = Subject.WeatherData() current_display = Observer.CurrentConditionsDisplay() statistics_display = Observer.StatisticsDisplay() forecast_display = Observer.ForecastDisplay() print('---- Push mode ----') weather_data.registerPushObserver(current_display) weather_data.registerPushObserver(statistics_display) weather_data.registerPushObserver(forecast_display) weather_data.simulateChange(80, 65, 30.4) weather_data.simulateChange(82, 70, 29.2) weather_data.simulateChange(78, 90, 29.2) weather_data.removePushObserver(current_display) weather_data.removePushObserver(statistics_display) weather_data.removePushObserver(forecast_display) print('---- Pull mode ----') weather_data.registerPullObserver(current_display) weather_data.registerPullObserver(statistics_display) weather_data.registerPullObserver(forecast_display) weather_data.simulateChange(80, 65, 30.4) weather_data.simulateChange(82, 70, 29.2) weather_data.simulateChange(78, 90, 29.2) weather_data.removePullObserver(current_display)
receiver_rewards.append( [receiver_reward_values[option] for option in action if action != []]) # Convert to numpy arrays. actions = np.array(actions) actor_rewards = np.array(actor_rewards) actor_beliefs = np.array(actor_beliefs) receiver_rewards = np.array(receiver_rewards) # Instantiate a state, actor, and observer. state = State(actions=actions, actor_rewards=actor_rewards, actor_beliefs=actor_beliefs, receiver_rewards=receiver_rewards) actor = Actor() observer = Observer(state, actor) print("Actions:") print(actions) index = 0 action = actions[index] print("Action:") print(action) # Run the sacrifice model. print(observer.sacrifice(action)) # Run the utilitarian model. print(observer.utilitarian(action, receiver_reward_values)) # Run the ToM model.
# open top left, go right, and go down #OpenDrawers = ['m0-0','m0-1','m1-1'] # top left, middle left OpenDrawers = ['m0-0', 'm1-0'] # memory test #OpenDrawers = ['m0-0','m0-1','m2-2'] # open first row #OpenDrawers = ['m0-0','m0-1', 'm0-2'] # open two drawers in the middle #OpenDrawers = ['m2-3','m2-2'] # Part 2: Load model and run inference sys.stdout.write("Loading model and policy...\n") Observer = Observer.Observer(WorldModel, AgentModel, OpenDrawers, DrawerDimensions) #Observer = Observer.Observer(WorldModel, AgentModel, OpenDrawers, DrawerDimensions, Colors) Observer.load() sys.stdout.write("Inferring knowledge...\n") Observer.InferKnowledge(Rationality) # Part 3: Process posterior distribution and visualize Results = Observer.ProcessPosterior(rounded=True) Observer.PrintPosterior() #Observer.PlotPosterior(Results, Title='_'.join(OpenDrawers))
RECV_BUFFER = 1024 # Advisable to keep it as an exponent of 2 PORT = 8888 MAXROOMCOUNT = 10 server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) # this has no effect, why ? server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) server_socket.bind(("0.0.0.0", PORT)) server_socket.listen(5) # Add server socket to the list of readable connections CONNECTION_LIST.append(server_socket) logger.info("Socket server started on port " + str(PORT)) observer = Observer.WebSocket() observer.start() ipCheck = IpCheck.Manager() ipCheck.logger = logger interruptList = [] try: while 1: # Get the list sockets which are ready to be read through select read_sockets,write_sockets,error_sockets = select.select(CONNECTION_LIST,[],[]) for sock in read_sockets: #New connection if sock == server_socket: # Handle the case in which there is a new connection recieved through server_socket sockfd, addr = server_socket.accept()
import Observer myFSM = "Off" myFSM = "On" if (myFSM == "Off") : print("Off") elif (myFSM == "On") : print("On") else : print("Neither") print("myStrat") n = 3 obs1 = Observer.Observer() obs2 = Observer.Observer() observerList = [obs1] observerList.insert(len(observerList), obs2) for observerIndex in range(0, len(observerList), 1): observerList[observerIndex].printNum() obs1.setX(10) print(obs1.getX(), end='')
class Robot: def __init__(self, name, remotes=None): self.name = name self.remotes = remotes self.state = 0 self.status = "running" log("Initializing robot", self.name) self.observer = Observer(self.name) self.thinker = Thinker(self.name) self.actor = Actor(self.name) self.session_done = False self.vague = False self.actions = [] self.rewards = [] self.cont_reward = [] if self.remotes: # Observer try: self.remotes['observer'] except: self.observer = Pyro4.Proxy(self.remotes['observer']) # Thinker try: self.remotes['thinker'] except: self.thinker = Pyro4.Proxy(self.remotes['thinker']) # Actor try: self.remotes['actor'] except: self.actor = Pyro4.Proxy(self.remotes['actor']) self.observer.initialize() self.observer.register_device('microphone', Microphone(self.name)) self.observer.register_device('camera', Camera(self.name)) self.actor.register_device('speaker', Speaker(self.name)) self.actor.register_device('eyes', Eyes(self.name)) def observe(self): self.observer.observe() pass def think(self, i): action = self.thinker.think(i) self.actions.append(action) return action def act(self, action): self.actor.output_space['speaker'].append(action) self.actor.output_space['eyes'].append(action) #self.actor.output_space['eyes'].append self.actor.act() self.state = self.thinker.newest() #reward = int(input("Feedback score: ")) #self.thinker.feedback(reward, action, self.state) def check_status(self): obs = self.observer.newest() if any(word in obs['microphone'] for word in ["stop", "kill", "nine nine", "satisfied"]): os.system("mpg123 -q understood.mp3") self.status = "session done" self.session_done = True elif any(word in obs['microphone'] for word in ["thanks", "ok", "okay", "ready", "ha", "haha", "hahaha"]): os.system("mpg123 -q understood.mp3") self.status = "step done" elif not self.vague: os.system("mpg123 -q not_understood.mp3") self.actor.devices['eyes'].act('vagueleft') self.vague = True def ta(self): #self.observe() observations = self.observer.newest() self.actor.devices['eyes'].act('closed') time.sleep(2) self.think(observations) actions = self.thinker.newest() self.actor.devices['eyes'].act('amused') self.act(actions) self.actor.devices['eyes'].act('neutral') def tao(self): self.actor.devices['eyes'].act('neutral') self.ta() done = False while not done: self.observe() self.check_status() if self.status in ["step done", "session done"]: feedback = self.observer.newest()['camera'] #self.cont_reward += feedback self.thinker.feedback(feedback) self.rewards.append(feedback) done = True self.status = "running" # Reset the robot status def ask_feedback(self): self.actor.devices['speaker'].act("Are you satisfied with the " + self.name + " experience?") self.observe() if self.observer.newest()['microphone'] == "yes": old_state = 0 new_state = 1 for index in range(0, len(self.actions)): self.thinker.last_feedback(old_state, new_state, self.actions[index], self.rewards[index]) old_state += 1 new_state += 1 #self.thinker.feedback(1, 1, 1, 1) else: pass #self.thinker.feedback(1, 1, 0, 1) self.actor.devices['eyes'].act('amused') self.actor.devices['speaker'].act("Thank you, come again!")
def SelSurf(self): obs = Observer.SelectionObserver() FreeCADGui.Selection.addObserver(obs)
from Observer import * from Agent import * from Event import * MyEvent = Event(options=[0, 1], agentvalue=[2.5, 2], agentbeliefs=[9, 3], recipientrewards=[4, 8]) MyAgent = Agent() # Create an observer with an event and a mental model of the agent MyObserver = Observer(MyEvent, MyAgent) MyObserver.ToM( 1 ) # Infers that agent really cares about own utilities and not about others MyObserver.ToM(0) # Uncertain. # Same as above, but now make agent value option 0 a lot MyEvent = Event(options=[0, 1], agentvalue=[10, 2], agentbeliefs=[9, 3], recipientrewards=[4, 8]) MyAgent = Agent() MyObserver = Observer(MyEvent, MyAgent) MyObserver.ToM( 1 ) # In contrast to above, now less certain that agent is selfish (because option 0 is very valuable) MyObserver.ToM(0) # Should infer low selfishness and high altruism
from Belief import * from Agent import * from Observer import * # Experiment 7a # Two options and neither have a cost. Costs = [0, 1, 2] Rewards = [-3, -2, -1, 1, 2, 3] PC = [1, 1, 1] PR = [1, 1, 1, 1, 1, 1] # Create observer for naive agent who know what she likes. # Fix the cost values and let rewards fluctuate across agents. Obs_Naive = Observer(Belief(Costs, PC, 0), Belief(Rewards, PR, 1), Belief(Costs, PC, 2), Belief(Rewards, PR, 1), [0, 1, 0, 1], [1, 0, 1, 0]) # Create observer for knowledgeable agent who knows both costs and rewards. # Fix costs and let rewards fluctuate Obs_Knowledgeable = Observer(Belief(Costs, PC, 0), Belief(Rewards, PR, 1), Belief(Costs, PC, 0), Belief(Rewards, PR, 1), [1, 1, 1, 1], [1, 0, 1, 0]) [LAgent_Naive, Samples_Naive, Probabilities_Naive] = Obs_Naive.ObserveAction(1) [LAgent_Knowledgeable, Samples_Knowledgeable, Probabilities_Knowledgeable] = Obs_Knowledgeable.ObserveAction(1) # Take the probability of each sample producing a choice change. p_Naive = sum([ Samples_Naive[i].ChoiceChange() * Probabilities_Naive[i]
def __init__(self, difficulty): self.__dame = Dame.Dame(difficulty) observer = Observer.Observer(self, AdapterPrint.AdapterPrint()) self.__dame.add_observer(observer)
import json import os import Observer class Folder_Organize(): def create_folders(self): with open("Extensions.json", "r+") as jsonfile: data = json.load(jsonfile) # set download folder if data["download_dir"] == "": jsonfile.seek(0) jsonfile.truncate() # download directory dl_dir = input("Enter your download folder path : ") data["download_dir"] = dl_dir json.dump(data, jsonfile, indent=2) # variables self.dl_dir = data["download_dir"] self.directories = [ directory for directory in data["filetypes"] for directory in directory ] self.filetypes = data["filetypes"] folders = Folder_Organize() folders.create_folders() Observer.run_handler(folders.dl_dir, folders.filetypes)
def LoadObserver(MapConfig, Revise=False, Silent=False): """ Load a map. If map isn't found in Bishop's library the function searches for the map in your working directory. Args: MapConfig (str): Name of map to load Revise (bool): When true, user manually confirms or overrides parameters. Silent (bool): If false then function doesn't print map. Returns: Observer object """ try: Local = False if Revise: sys.stdout.write( "\nPress enter to accept the argument or type in the new value to replace it.\n\n" ) Config = ConfigParser.ConfigParser() FilePath = os.path.dirname(__file__) + "/Maps/" FilePath = LocateFile(FilePath, MapConfig + ".ini") if FilePath is not None: FilePath = FilePath + "/" + MapConfig + ".ini" ######################### ## Load .ini map first ## ######################### else: FilePath = MapConfig + ".ini" Local = True if not os.path.isfile(FilePath): print("ERROR: Map not found.") return None Config.read(FilePath) except Exception as error: print(error) # Agent parameter section ######################### if not Config.has_section("AgentParameters"): print("ERROR: AgentParameters block missing.") return None if Config.has_option("AgentParameters", "Method"): temp = Config.get("AgentParameters", "Method") if temp == 'Linear' or temp == 'Rate': Method = temp else: if not Silent: if temp == 'Discount': print( "Discount method is now integrated with the linear utility method (2.6+). Use organic markers to mark discounts." ) else: print( "ERROR: Unknown utility type. Using a linear utility function." ) Method = "Linear" else: if not Silent: print( "Using a linear utility function (Add a Method in the AgentParameters block to change to 'Rate' utilities)." ) Method = "Linear" if Revise: temp = raw_input("Utility type (Rate or Linear. Current=" + str(Method) + "):") if temp != '': if temp == 'Linear' or temp == 'Rate': Method = temp else: print("Not valid. Setting Method to Linear") Method = "Linear" if Config.has_option("AgentParameters", "Prior"): CostPrior = Config.get("AgentParameters", "Prior") RewardPrior = CostPrior if Revise: temp = raw_input("CostPrior (" + str(CostPrior) + "):") if temp != '': CostPrior = str(temp) temp = raw_input("RewardPrior (" + str(RewardPrior) + "):") if temp != '': RewardPrior = str(temp) else: if Config.has_option("AgentParameters", "CostPrior"): CostPrior = Config.get("AgentParameters", "CostPrior") if Revise: temp = raw_input("CostPrior (" + str(CostPrior) + "):") if temp != '': CostPrior = str(temp) else: print( "WARNING: No cost prior specified in AgentParameters. Use Agent.Priors() to see list of priors" ) return None if Config.has_option("AgentParameters", "RewardPrior"): RewardPrior = Config.get("AgentParameters", "RewardPrior") if Revise: temp = raw_input("RewardPrior (" + str(RewardPrior) + "):") if temp != '': RewardPrior = str(temp) else: print( "WARNING: No reward prior specified in AgentParameters. Use Agent.Priors() to see list of priors" ) return None if Config.has_option("AgentParameters", "Minimum"): Minimum = Config.getint("AgentParameters", "Minimum") else: Minimum = 0 if Revise: temp = raw_input("Minimum objects to collect (" + str(Minimum) + "):") if temp != '': Minimum = int(temp) if Config.has_option("AgentParameters", "Capacity"): Capacity = Config.getint("AgentParameters", "Capacity") else: Capacity = -1 if Revise: temp = raw_input("Agent capacity (" + str(Capacity) + "; -1 = unlimited):") if temp != '': Capacity = int(temp) if Capacity != -1 and Minimum > Capacity: sys.stdout.write( "ERROR: Agent's minimum number of elements exceed capacity.") return None if Config.has_option("AgentParameters", "Restrict"): Restrict = Config.getboolean("AgentParameters", "Restrict") else: if not Silent: print( "Setting restrict to false (i.e., uncertainty over which terrain is the easiest)" ) Restrict = False if Config.has_option("AgentParameters", "SoftmaxChoice"): SoftmaxChoice = Config.getboolean("AgentParameters", "SoftmaxChoice") else: print("Softmaxing choices") SoftmaxChoice = True if Revise: temp = raw_input("Softmax choices (" + str(SoftmaxChoice) + "):") if temp != '': if temp == 'True': SoftmaxChoice = True elif temp == 'False': SoftmaxChoice = False else: sys.stdout.write("Not a valid choice. Ignoring.\n") if Config.has_option("AgentParameters", "SoftmaxAction"): SoftmaxAction = Config.getboolean("AgentParameters", "SoftmaxAction") else: print("Softmaxing actions") SoftmaxAction = True if Revise: temp = raw_input("Softmax actions (" + str(SoftmaxAction) + "):") if temp != '': if temp == 'True': SoftmaxAction = True elif temp == 'False': SoftmaxAction = False else: sys.stdout.write("Not a valid choice. Ignoring.\n") if Config.has_option("AgentParameters", "choiceTau"): choiceTau = Config.getfloat("AgentParameters", "choiceTau") else: if SoftmaxChoice: print("Setting choice softmax to 0.01") choiceTau = 0.01 else: # Doesn't matter; won't be used. choiceTau = 0 if (Revise and SoftmaxChoice): temp = raw_input("Choice tau (" + str(choiceTau) + "):") if temp != '': choiceTau = float(temp) if Config.has_option("AgentParameters", "actionTau"): actionTau = Config.getfloat("AgentParameters", "actionTau") else: if SoftmaxAction: print("Setting action softmax to 0.01") actionTau = 0.01 else: # Doesn't matter; won't be used. actionTau = 0 if (Revise and SoftmaxChoice): temp = raw_input("Action tau (" + str(actionTau) + "):") if temp != '': actionTau = float(temp) if Config.has_option("AgentParameters", "CostParameters"): CostParameters = Config.get("AgentParameters", "CostParameters") if Revise: temp = raw_input("Cost parameters (" + str(CostParameters) + "):") if temp != '': CostParameters = temp CostParameters = CostParameters.split() CostParameters = [float(i) for i in CostParameters] else: print( "ERROR: Missing cost parameters for prior sampling in AgentParameters block." ) return None if Config.has_option("AgentParameters", "RewardParameters"): RewardParameters = Config.get("AgentParameters", "RewardParameters") if Revise: temp = raw_input("Reward parameters (" + str(RewardParameters) + "):") if temp != '': RewardParameters = temp RewardParameters = [float(i) for i in RewardParameters.split()] else: print( "ERROR: Missing cost parameters for prior sampling in AgentParameters block." ) return None if Config.has_option("AgentParameters", "PNull"): CNull = Config.getfloat("AgentParameters", "PNull") RNull = CNull else: if Config.has_option("AgentParameters", "CNull"): CNull = Config.getfloat("AgentParameters", "CNull") else: print( "WARNING: No probability of terrains having null cost. Setting to 0." ) CNull = 0 if Config.has_option("AgentParameters", "RNull"): RNull = Config.getfloat("AgentParameters", "RNull") else: print( "WARNING: No probability of terrains having null cost. Setting to 0." ) RNull = 0 if Revise: temp = raw_input("Null cost paramter (" + str(CNull) + "):") if temp != '': CNull = float(temp) temp = raw_input("Null reward paramter (" + str(RNull) + "):") if temp != '': RNull = float(temp) # Map parameter section ####################### if not Config.has_section("MapParameters"): print("ERROR: MapParameters block missing.") return None if Config.has_option("MapParameters", "DiagonalTravel"): DiagonalTravel = Config.getboolean("MapParameters", "DiagonalTravel") else: print("Allowing diagonal travel") DiagonalTravel = True if Revise: temp = raw_input("Diagonal travel (" + str(DiagonalTravel) + "):") if temp != '': if temp == "True": DiagonalTravel = True elif temp == "False": DiagonalTravel = False else: sys.stdout.write("Not a valid choice. Ignoring.\n") if Config.has_option("MapParameters", "StartingPoint"): StartingPoint = Config.getint("MapParameters", "StartingPoint") else: print("ERROR: Missing starting point in MapParameters block.") return None if Revise: temp = raw_input("Starting point (" + str(StartingPoint) + "):") if temp != '': StartingPoint = int(temp) if Config.has_option("MapParameters", "ExitState"): ExitState = Config.getint("MapParameters", "ExitState") else: print("ERROR: Missing exit state in MapParameters block.") return None if Revise: temp = raw_input("Exit state (" + str(ExitState) + "):") if temp != '': ExitState = int(temp) try: if Config.has_option("MapParameters", "MapName"): MapName = Config.get("MapParameters", "MapName") if not Local: TerrainPath = os.path.dirname(__file__) + "/Maps/" TerrainPath = os.path.join(LocateFile(TerrainPath, MapName), MapName) else: TerrainPath = MapName f = open(TerrainPath, "r") MapLoad = True StateTypes = [] StateNames = [] mapheight = 0 for line in iter(f): if MapLoad: states = [int(i) for i in list(line.rstrip())] if states == []: MapLoad = False else: mapheight += 1 StateTypes.extend(states) else: statename = line.rstrip() if statename != "": StateNames.append(statename) f.close() mapwidth = len(StateTypes) / mapheight else: print("ERROR: Missing map name") return None except: print("ERROR: Cannot load map layout.") # raise return None # Load object information ######################### if not Config.has_section("Objects"): print("ERROR: Objects block missing.") return None else: if Config.has_option("Objects", "ObjectLocations"): ObjectLocations = Config.get("Objects", "ObjectLocations") ObjectLocations = [int(i) for i in ObjectLocations.split()] HasObjects = True else: print( "WARNING: No objects in map (Agent will always go straight home)." ) HasObjects = False if HasObjects: if Config.has_option("Objects", "ObjectTypes"): ObjectTypes = Config.get("Objects", "ObjectTypes") ObjectTypes = [int(i) for i in ObjectTypes.split()] if len(ObjectTypes) != len(ObjectLocations): print( "Error: ObjectLocations and ObjectTypes should have the same length" ) return None else: print( "WARNING: No information about object types. Setting all to same kind." ) ObjectTypes = [0] * len(ObjectLocations) if Config.has_option("Objects", "ObjectNames"): ObjectNames = Config.get("Objects", "ObjectNames") ObjectNames = [str(i) for i in ObjectNames.split()] else: ObjectNames = None if Config.has_option("Objects", "Organic"): Organic = Config.get("Objects", "Organic") Organic = [bool(int(i)) for i in Organic.split()] else: if not Silent: print( "No organic markers. Treating all objects as dead. Add an Organic line to mark if some object types are agents (add probability of death)." ) Organic = [False] * len(ObjectTypes) if Config.has_option("Objects", "SurvivalProb"): SurvivalProb = Config.getfloat("Objects", "SurvivalProb") if sum(Organic) == 0: print( "You specified a survival probability, but there are no organic objects. Model will work but maybe you specified the map incorrectly." ) if Revise: temp = raw_input("Survival probability (" + str(SurvivalProb) + "):") if temp != '': SurvivalProb = float(temp) else: if sum(Organic) > 0: if Revise: temp = raw_input( "Survival probability (between 0 and 1):") if temp != '': SurvivalProb = float(temp) else: print( "Map has organic objects but survival probability not specified. Setting to 0.95; change this by adding a Survival parameter on the Objects block." ) SurvivalProb = 0.95 else: # Just to fit in with Planner constructor. SurvivalProb = 1 else: ObjectTypes = [] ObjectNames = None # Create objects! try: MyMap = Map() MyMap.BuildGridWorld(mapwidth, mapheight, DiagonalTravel) MyMap.InsertObjects(ObjectLocations, ObjectTypes, Organic, ObjectNames, SurvivalProb) MyMap.StateTypes = StateTypes MyMap.StateNames = StateNames MyMap.AddStartingPoint(StartingPoint) MyMap.AddExitState(ExitState) if not Silent: sys.stdout.write("\n") MyMap.PrintMap() MyAgent = Agent(MyMap, CostPrior, RewardPrior, CostParameters, RewardParameters, Capacity, Minimum, SoftmaxChoice, SoftmaxAction, choiceTau, actionTau, CNull, RNull, Restrict) return Observer.Observer(MyAgent, MyMap, Method) except Exception as error: print(error)
def crawl(): global np np.crawl() ### MAIN ### cp = ConfigParser.ConfigParser() #genConfigFile(cp) readConfigFile(cp) seedsList = getConfigSourcesList(cp) blackList = getConfigWordsBlackList(cp) eng = Engine() eng.setWordBlackList(blackList) np = NewsParser(seedsList, engine=eng) obs = Observer(eng) if __name__ == "__main__": restore() crawl() obs.notify() save() print("\n\n KeyWords : ") eng.printKeyWords(5) #eng.listWords() print("\n")
class Nabla(object): # Constructor for the Nabla class. def __init__(self,args): self.beam = args[0] if self.beam is None: self.beam = 0.0 self.N = args[1] self.boxlen = args[2] self.sigma = self.beam/(sqrt(8.0*log(2))) self.pixelwidth = max(int(self.N*self.sigma/(self.boxlen)), 1) self.scale = self.__CalculateScales() self.path = './' self.Writer = Observer([None, self.N, self.boxlen, './']) # Mutator to change the optional label for saving. def ChangeOptLabel(self, new_optlabel): Writer.ChangeOptLabel(new_optlabel) return # Mutator to change the path to save. def ChangePath(self, new_path): Writer.ChangePath(new_path) return # This function produces the x,y combinations of pixels that correspond to a # circle the width of the pixelwidth chosen to compute our gradients. These # combinations tell the gradient functions the locations of the different # points away from the central point to calculate the finite differencing # with. This enables gradients to be computed at different scales on the # POS. def __CalculateScales(self): scales = [] l = self.pixelwidth for a in range(-l,l+1): for b in range(-l,l+1): r = np.sqrt(a**2 + b**2) if (r <= l + 0.5) and (r >= l - 0.5): scales.append([a,b]) return scales # The inferred magnetic field angle on the plane of the sky (chi) is a # quantity specified entirely by the Stokes parameters Q and U. Computing # the angle difference must be done understanding that the angle is # degenerate with the same POS angle that is pi/2 out of phase with it, # making the maximum difference 90 degrees. The difference is efficiently # computed directly from the stokes parameters. The result is given in # degrees. def __DQU(self, Qc, Qi, Uc, Ui): return np.rad2deg(0.5*np.arctan2((Qi*Uc - Qc*Ui),(Qi*Qc + Ui*Uc))) # This function computes the angular scalar gradient (using the DQU function # to properly account for angular differences) for angular quantities. def AngleGradient(self, Q, U, mask): q = np.array(Q.data) u = np.array(U.data) xlim = q.shape[0] ylim = q.shape[1] S2 = np.zeros((xlim,ylim)) for i in range(xlim): for j in range(ylim): num = 0 if not mask[i,j]: for vec in self.scale: a = vec[0] b = vec[1] if (i + a) > (xlim - 1): ip = i + a - xlim if (j + b) > (ylim - 1): jp = j + b - ylim elif (j + b) < 0: jp = j + b + ylim else: jp = j + b elif (i + a) < 0: ip = i + a + xlim if (j + b) > (ylim - 1): jp = j + b - ylim elif (j + b) < 0: jp = j + b + ylim else: jp = j + b else: ip = i + a if (j + b) > (ylim - 1): jp = j + b - ylim elif (j + b) < 0: jp = j + b + ylim else: jp = j + b if not mask[ip,jp]: inc = self.__DQU(q[i,j],q[ip,jp],u[i,j],u[ip,jp])**2 S2[i,j] += inc num += 1 if num != 0: S2[i,j] = S2[i,j]/num return np.ma.masked_array(np.sqrt(S2),mask) # This function computes the ordinary scalar gradient, simply computing the # squared finite differences at the specified scales for typical POS # quantities. def Gradient(self, O, mask): v = O.data xlim = v.shape[0] ylim = v.shape[1] G2 = np.zeros((xlim,ylim)) for i in range(xlim): for j in range(ylim): num = 0 if not mask[i,j]: for vec in self.scale: a = vec[0] b = vec[1] if (i + a) > (xlim - 1): ip = i + a - xlim if (j + b) > (ylim - 1): jp = j + b - ylim inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 elif (j + b) < 0: jp = j + b + ylim inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 else: jp = j + b inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 elif (i + a) < 0: ip = i + a + xlim if (j + b) > (ylim - 1): jp = j + b - ylim inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 elif (j + b) < 0: jp = j + b + ylim inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 else: jp = j + b inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 else: ip = i + a if (j + b) > (ylim - 1): jp = j + b - ylim inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 elif (j + b) < 0: jp = j + b + ylim inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 else: jp = j + b inc = (v[ip,jp] - v[i,j])**2 G2[i,j] += inc num += 1 G2[i,j] = G2[i,j]/num return np.ma.masked_array(np.sqrt(G2),mask) # Compute the Angle Gradient and construct and Observable object, then # return it. def ComputeAngleGradient(self, Q, U, mask): sdata = self.AngleGradient(Q, U, mask) sargs = [sdata, self.N, 'log', 'Dispersion in Polarization Angles', '$S$', 'Degrees', 'gist_heat', Q.axes, Q.rotation] if self.pixelwidth == 1: sargs.append(None) else: sargs.append(self.pixelwidth) S = Observable(sargs) self.Writer.WriteObservable(S) return S # Compute an ordinary scalar gradient, construct an Observable object, and # return it. def ComputeGradient(self, O, mask): gdata = self.Gradient(O, mask) gdata = gdata/max(self.pixelwidth,(1.0/self.N)) if O.units is not None: gunits = O.units + ' pc$^{-1}$' else: gunits = 'pc$^{-1}$' gargs = [gdata, self.N, 'log', O.lname + ' POS Gradient', 'D' + O.sname, gunits, O.colmap, O.axes, O.rotation] if self.pixelwidth == 1: gargs.append(None) else: gargs.append(self.pixelwidth) G = Observable(gargs) self.Writer.WriteObservable(G) return G
import time import sysfrom watchdog.observers import Observer from watchdog.events import FileSystemEventHandlerfolder_to_monitor = sys.argv[1]file_folder_mapping = { '.png': 'images', '.jpg': 'images', '.jpeg': 'images', '.gif': 'images', '.pdf': 'pdfs', '.mp4': 'videos', '.mp3': 'audio', '.zip': 'bundles' }class DownloadedFileHandler(FileSystemEventHandler): def on_created(self, event): if any(event.src_path.endswith(x) for x in file_folder_mapping): parent = os.path.join(os.path.dirname(os.path.abspath( event.src_path)), file_folder_mapping.get(f".{event.src_path.split('.')[-1]}")) if not os.path.exists(parent): os.makedirs(parent) os.rename(event.src_path, os.path.join(parent, os.path.basename(event.src_path)))event_handler = DownloadedFileHandler() observer = Observer() observer.schedule(event_handler, folder_to_monitor, recursive=True) print("Monitoring started") observer.start() try: while True: time.sleep(10)except KeyboardInterrupt: observer.stop() observer.join() # python downloads-watchdog.py "/your/downloads/folder"
import Observer import mqtt_devices.IKEA import mqtt_devices.OSRAM # Observer observer = Observer.Observer('192.168.188.20', 'zigbee2mqtt') # Lights light_bedroom_top = mqtt_devices.IKEA.TRADFRI_RGB_LED('zigbee2mqtt/BRLightTop', observer.publish) light_living_room_top = mqtt_devices.IKEA.TRADFRI_RGB_LED('zigbee2mqtt/LRLightTop', observer.publish) light_living_room_shelf = mqtt_devices.IKEA.TRADFRI_RGB_LED('zigbee2mqtt/LRLightShelf', observer.publish) # Switches switch_bedroom = mqtt_devices.IKEA.TRADFRI_SWITCH('zigbee2mqtt/BRSwitch') remote_bedroom = mqtt_devices.IKEA.TRADFRI_REMOTE('zigbee2mqtt/BRRemote') switch_living_room = mqtt_devices.IKEA.TRADFRI_SWITCH('zigbee2mqtt/LRSwitch') remote_living_room = mqtt_devices.IKEA.TRADFRI_REMOTE('zigbee2mqtt/LRRemote') # Plugs plug_window = mqtt_devices.OSRAM.OSRAM_SMART_PLUG('zigbee2mqtt/LRPlugWindow', observer.publish) plug_shelf = mqtt_devices.OSRAM.OSRAM_SMART_PLUG('zigbee2mqtt/LRPlugShelf', observer.publish)
from Belief import * from Agent import * from Observer import * # Experiments 1 and 2 # Two options and neither have a cost. Costs = [0] Rewards = [-3, -2, -1, 1, 2, 3] PC = [1] PR = [1, 1, 1, 1, 1, 1] # Create observer for naive agent ObsA = Observer(Belief(Costs, PC, 0), Belief(Rewards, PR, 1), Belief(Costs, PC, 0), Belief(Rewards, PR, 1), [0, 0, 0, 0]) # Create observer for knowledgeable agent. ObsB = Observer(Belief(Costs, PC, 0), Belief(Rewards, PR, 1), Belief(Costs, PC, 0), Belief(Rewards, PR, 1), [0, 1, 0, 1]) [LAgent_Naive, Samples_Naive, Probabilities_Naive] = ObsA.ObserveAction(0) [LAgent_Knowledgeable, Samples_Knowledgeable, Probabilities_Knowledgeable] = ObsB.ObserveAction(0) # Integrate likelihoods with priors. ObsA.BuildPosterior(LAgent_Naive) ObsB.BuildPosterior(LAgent_Knowledgeable) # Experiment 1: What is each agent's probability of getting a low reward? # Both agents chose option 0 so we just need to get the probability that # TrueValue for option 0 is negative.
Rationality = 0.01 # Build filenames if CulturalModel: Basefile = 'POMDPs/Drawer' + str(DrawerDimensions[0]) + 'x' + str( DrawerDimensions[1]) + 'space_cultural' Outputfile = 'DrawerPredictions_cultural.csv' else: Basefile = 'POMDPs/Drawer' + str(DrawerDimensions[0]) + 'x' + str( DrawerDimensions[1]) + 'space_rational' Outputfile = 'DrawerPredictions_rational.csv' WorldModel = Basefile + '.POMDP' AgentModel = Basefile + '.policy' OpenDrawers = [] sys.stdout.write("Loading model and policy...\n") Observer = Observer.Observer(WorldModel, AgentModel, OpenDrawers, DrawerDimensions) Observer.load() # Part 2: LOAD EACH TRIAL AND RUN ################################# WriteHeader = True with open('DrawerInputData_Exp.csv') as csv_file: csv_reader = csv.reader(csv_file, delimiter=",") next(csv_reader) # skip header for trial in csv_reader: TrialName = trial[0] sys.stdout.write("Running trial: " + TrialName + "...\n") Observer.OpenDrawers = trial[1].split(' ') DrawerColors = trial[2] if DrawerColors == "": Observer.DrawerColors = np.zeros(