def initScheduler(self): """ Create a Scheduler and ThreadedScheduler (this will run the Sceheduler class in a thread like ThreadedSensor for tobii) class like in initTobiiEyeTracker method """ self.scheduler = Scheduler.Scheduler()
async def create_components(self, name, start_time, runtime, broker_address, protocol, settings): self.subscriptions = [] self.number_of_clients = [] self.responses = {} file = open("{0}warning.log".format(name), 'w') file.truncate(0) file.close() self.warning_logger = logging.getLogger("WarningLogger") self.warning_logger.handlers.clear() self.warning_logger.addHandler( logging.FileHandler("{0}warning.log".format(name))) self.warning_logger.setLevel(logging.INFO) self.scheduler = Scheduler.Scheduler(self, start_time, runtime) self.scheduler.start_scheduler() self.broker_address = broker_address self.start_time = start_time self.scheduler.schedule_stop() self.scheduler.schedule_resource_measuring() if not self.check_ntp_synchronization(): self.warning_logger.info("Time is not synchronized with ntp") if not self.check_ptp_synchronization(): self.warning_logger.info("Time is not synchronized with ptp") self.set_adapter(protocol, name) if isinstance(self.adapter, MqttAdapter.MqttAdapter): await self.adapter.connect(broker_address, settings) else: print(broker_address) print(isinstance(self.adapter, CoapAdapter.CoapAdapter)) await self.adapter.connect(broker_address)
def processRun(self, processNumber): """ The code to run a new worker process """ # Process code signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGHUP, signal.SIG_DFL) self.log = LogManager("%s.%u" % (CONFIG.hostname, processNumber), False) self.ovtDB.reconnect(quiet=True, log=self.log) innerexception = None try: try: scheduler = Scheduler.Scheduler(self, processNumber) scheduler.run() except Exception, e: innerexception = formatExceptionInfo() try: self.ovtDB.FORCEROLLBACK() except DatabaseRetryException, e: self.ovtDB.reconnect() self.ovtDB.setAutoCommit(True) self.logDB(processNumber, innerexception) self.log.write("%s: %s" % (time.asctime(), innerexception)) sys.exit(3)
def schedule(filename): f = ReadFile(filename) scanner = Scanner(f) ir = IntermediateRepresentation() parser = Parser(scanner, ir) while True: if parser.scanner.stop_flag: break parser.parse_line() records, maxlive, maxVR = renameReg(ir.next, parser.maxSR + 1, parser.count - 1) records = list(reversed(records)) scheduler = Scheduler(records) scheduler.compute_priority() # print(scheduler.dependency) ins, debug = scheduler.instruction_schedule() # print(debug) ir_collection = scheduler.IR for s1, s2 in ins: idx1 = s1[0] - 1 idx2 = s2[0] - 1 out1 = get_print(s1, ir_collection[idx1].ir) out2 = get_print(s2, ir_collection[idx2].ir) sys.stdout.write('[' + out1 + '; ' + out2 + ']\n')
def __init__(self, road_control, car_amount): self.new_car = [] self.road_control = road_control self.sa = Scheduler.Scheduler() self.turn_left = 0.2 * car_amount / 14000 self.turn_right = 0.2 * car_amount / 14000 self.through = 0.6 * car_amount / 14000
def run(min_metric, apps, video_desc, mode, budget=350, scheduler="greedy", verbose=False): s = Scheduler.Scheduler(min_metric, apps, video_desc, app_data.model_desc, verbose=verbose, scheduler=scheduler) fnr, fpr, f1, cost, avg_rel_acc, num_frozen_list, target_fps_list = s.run( budget, mode=mode) stats = { "fnr": fnr, "fpr": fpr, "f1": f1, "cost": cost, "avg_rel_acc": avg_rel_acc, "frozen": num_frozen_list, "fps": target_fps_list, } stats["metric"] = 1 - stats[min_metric] return s, stats
def __init__(self, node_list, edge_list): #self.hist = dict() sched = Scheduler.Scheduler(node_list, edge_list) self.route_matrix = sched.calc_routes() for x in edge_list: # initiate weights with default values x.init_weights() self.edge_dict = Edge.Edge.edges_list_to_dict(edge_list)
def run_simulator(min_metric, apps, video_desc, budget=350, mode="mainstream", dp={}, **kwargs): s = Scheduler.Scheduler(min_metric, apps, video_desc, app_data.model_desc, **kwargs) stats = { "metric": s.optimize_parameters(budget, mode=mode, dp=dp), "rel_accs": s.get_relative_accuracies(), } # Get streamer schedule sched = s.make_streamer_schedule() # Use target_fps_str in simulator to avoid running on the hardware stats["fnr"], stats["fpr"], stats["f1"], stats[ "cost"] = s.get_observed_performance(sched, s.target_fps_list) stats["fps"] = s.target_fps_list stats["frozen"] = s.num_frozen_list stats["avg_rel_acc"] = np.average(stats["rel_accs"]) return s, stats
def start(self, updatePricesCallback, updatePositionsCallback): self.updatePricesCallback = updatePricesCallback self.updatePositionsCallback = updatePositionsCallback self.scheduler = Scheduler.Scheduler(0.5, self.scheduleWorker, multi_thread=False) self.scheduler.start()
def run_simulator(min_metric, apps, video_desc, budget=350, scheduler="greedy", verbose=False): #TODO: Use args again?? s = Scheduler.Scheduler(min_metric, apps, video_desc, app_data.model_desc, 0, verbose=verbose, scheduler=scheduler) stats = { "metric": s.optimize_parameters(budget), "rel_accs": s.get_relative_accuracies(), } # Get streamer schedule sched = s.make_streamer_schedule() # Use target_fps_str in simulator to avoid running on the hardware stats["fnr"], stats["fpr"], stats["f1"], stats[ "cost"] = s.get_observed_performance(sched, s.target_fps_list) stats["fps"] = s.target_fps_list stats["frozen"] = s.num_frozen_list stats["avg_rel_acc"] = np.average(stats["rel_accs"]) return s, stats
def _run(self): """ run your fun""" scheduler = Scheduler.Scheduler() scheduler.start() while True: print datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") #print 'get Task',datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") time.sleep(61)
def __init__(self, logger): # Logger self._logger = logger # Hardware of OS self._hardware = Hardware(self._logger) # Kernel of OS self._kernel = KernelSO(self._logger, self._hardware, Scheduler())
def init_objects(self): if self.initiated: raise Exception("Objects already initiated") else: self.initiated = True #objects self.scheduler = Scheduler(self) # Activate scheduler activate(self.scheduler, self.scheduler.run())
def main(): # Get the input file from the command line filename = sys.argv[1] sleepTime = int(sys.argv[2]) batch = Scheduler() try: # Load the input line and start running the batch batch.load_jobs(filename) start(batch, sleepTime) except Exception as e: print(e)
def test_formAndEvolvePartnerships(self): # use the previously build population to evolve it a little and check the results of the evolution schedule = Scheduler(params=self.params) for m in self.males: schedule.register_person(m) for f in self.females: schedule.register_person(f) print "forming partnerships" for day in xrange(params['nBurnDays']): schedule.coresim(males=self.males, females=self.females, day=day, params=params) print "n_partnerships:%d" % schedule.n_partnerships self.assertTrue(schedule.n_partnerships > 1) print "seeding infections" diseases = seed_infections(self.males, self.females, params['nBurnDays'], schedule=schedule, params=self.params) initial_f_infections = sum(f.is_infected for f in self.females) initial_m_infections = sum(m.is_infected for m in self.males) print "evolving partnerships" n_transmissions_max = 0 for day in xrange(params['nBurnDays'], 2 * params['nBurnDays']): schedule.coresim(males=self.males, females=self.females, day=day, params=self.params) n_transmissions_max = max(schedule.count_transmissions(), n_transmissions_max) self.nTransmissions = n_transmissions_max self.assertTrue(n_transmissions_max > 1) final_f_infections = sum(f.is_infected for f in self.females) final_m_infections = sum(m.is_infected for m in self.males) self.assertTrue(sum(f.is_infected for f in self.females) > 0) self.assertTrue(sum(m.is_infected for m in self.males) > 0) print "initial infections:%d,%d" % (initial_f_infections, initial_m_infections) print "final infections:%d,%d" % (final_f_infections, final_m_infections) # not strictly true, but highly unlikely # cannot guarantee final will be higher than initial # nor the reverse self.assertTrue(initial_f_infections != final_f_infections) self.assertTrue(initial_m_infections != final_m_infections)
def write_intermediate_files(args, setup, setup_suffix): print "Writing intermediate I/O file." apps = [app.to_map() for app in setup.apps] s = Scheduler.Scheduler(args.metric, apps, setup.video_desc.to_map(), app_data.model_desc) # Write cost benefits, model, and environment data for cpp fn cost_benefits = s.get_cost_benefits() f1 = write_cost_benefits_file(cost_benefits, args.outdir, setup_suffix) f2 = write_model_file(s.model.layer_latencies, args.outdir, setup_suffix) return
def __init__(self,runNum): #===== start singleton if self._init: return self._init = True #===== end singleton # store params self.runNum = runNum # local variables self.engine = SimEngine.SimEngine() self.settings = SimSettings.SimSettings() self.schedules = Scheduler.Scheduler() self.Schedulefilename ="../bin/simData/schedulefile.csv" self.scheduleoutputfile = open(self.Schedulefilename,"w") # stats self.stats = {} self.columnNames = [] self.datafilename = [] # start file if self.runNum==0: self._fileWriteHeader() # schedule actions self.engine.scheduleAtStart( cb = self._actionStart, ) self.engine.scheduleAtAsn( asn = self.engine.getAsn()+self.settings.slotframeLength-1, cb = self._actionEndCycle, uniqueTag = (None,'_actionEndCycle'), priority = 10, ) self.engine.scheduleAtEnd( cb = self._actionEnd, )
def __init__(self, runNum=None, failIfNotInit=False): if failIfNotInit and not self._init: raise EnvironmentError('SimEngine singleton not initialized.') #===== start singleton if self._init: return self._init = True #===== end singleton # store params self.runNum = runNum # local variables self.dataLock = threading.RLock() self.pauseSem = threading.Semaphore(0) self.simPaused = False self.goOn = True self.asn = 0 self.startCb = [] self.endCb = [] self.events = [] self.settings = SimSettings.SimSettings() self.propagation = Propagation.Propagation() self.motes = [Mote.Mote(id) for id in range(self.settings.numMotes)] self.topology = Topology.Topology(self.motes) self.scheduler = Scheduler.Scheduler() #self.LinkSchedules = LinkSchedules.LinkSchedules(self.motes) self.topology.createTopology() # run Scheduler self.scheduler.updateMoteSchedules() # boot all motes for i in range(len(self.motes)): self.motes[i].boot() # initialize parent class threading.Thread.__init__(self) self.name = 'SimEngine'
def run_scheduler(metric, setup, setup_suffix, scheduler_type, is_simulator): apps = [app.to_map() for app in setup.apps] budget = setup.budget s = Scheduler.Scheduler(metric, apps, setup.video_desc.to_map(), app_data.model_desc, 0, scheduler=scheduler_type) # Run mainstream start = datetime.datetime.now() if (is_simulator): print "Running " + scheduler_type + " simulator." s, stats = sim.run_simulator(metric, apps, setup.video_desc.to_map(), budget, scheduler=scheduler_type) else: print "Running " + scheduler_type + " with streamer." if scheduler_type == "greedy" or scheduler_type == "hifi": sharing_type = "mainstream" else: sharing_type = scheduler_type s, stats = sched.run(metric, apps, app_data.video_desc, sharing_type, budget=budget) end = datetime.datetime.now() diff = end - start row = get_eval(len(apps), s, stats, budget, diff.microseconds) return row
def test_optimize_parameters(): three_apps = apps[:3] # Decrease to three apps so we can brute force s = Scheduler.Scheduler("f1", three_apps, video_desc, model_desc) # Quickly get reference values with s.get_parameter_options() ''' schedules, metrics, costs = s.get_parameter_options() for sched, m, c in zip(schedules, metrics, costs): print "----------------------------" print "1- F1:", m, ",", "Cost:", c for unit in sched: print unit.app_id, ":", unit.target_fps, ",", unit.num_frozen ''' # Heuristic does not achieve highest possible F1 # Best case metric: 0.129 _should_ be achievable with cost: 242 metric = round(s.optimize_parameters(400), 4) print metric assert metric == 0.129
def schedule(filename): f = ReadFile(filename) scanner = Scanner(f) ir = IntermediateRepresentation() parser = Parser(scanner, ir) while True: if parser.scanner.stop_flag: break parser.parse_line() records, maxlive, maxVR = renameReg(ir.next, parser.maxSR + 1, parser.count - 1) records = list(reversed(records)) # for record in records: # curIR = record.ir # if curIR[0] == OUTPUT: # sys.stdout.write("%s %d\n" % (instructions[curIR[OP]], curIR[R1])) # elif curIR[0] == LOADI: # sys.stdout.write("%s %d => r%d\n" % (instructions[curIR[OP]], curIR[R1], curIR[VR3])) # elif curIR[0] == NOP: # sys.stdout.write("%s \n" % (instructions[curIR[OP]])) # elif curIR[0] == LOAD or curIR[0] == STORE: # sys.stdout.write("%s r%d => r%d\n" % (instructions[curIR[OP]], curIR[VR1], curIR[VR3])) # elif curIR[0] == LSHIFT or curIR[0] == RSHIFT \ # or curIR[0] == ADD or curIR[0] == SUB or curIR[0] == MULT: # sys.stdout.write("%s r%d, r%d => r%d\n" % (instructions[curIR[OP]], curIR[VR1], curIR[VR2], curIR[VR3])) scheduler = Scheduler(records) scheduler.compute_priority() # print(scheduler.dependency) ins, debug = scheduler.instruction_schedule() # print(debug) ir_collection = scheduler.IR for s1, s2 in ins: idx1 = s1[0] - 1 idx2 = s2[0] - 1 out1 = get_print(s1, ir_collection[idx1].ir) out2 = get_print(s2, ir_collection[idx2].ir) sys.stdout.write('[' + out1 + '; ' + out2 + ']\n')
def symulate(self): self.close() pygame.init() pygame.font.init() self.font_path = pygame.font.match_font('avantgarde md bt') self.MAP_SIZE_H = 700 self.MAP_SIZE_W = 1000 self.MENU_W = 400 if self.map_high > self.map_width or ( self.MAP_SIZE_W / self.map_width) * self.map_high > self.MAP_SIZE_H: self.scaled = (float)(self.MAP_SIZE_H / self.map_high) else: self.scaled = (float)(self.MAP_SIZE_W / self.map_width) self.area = pygame.display.set_mode( ((int)(self.map_width * self.scaled + self.MENU_W), (int)(self.map_high * self.scaled)), 0, 32) pygame.display.set_caption('Symulacja') scheduler = Scheduler.Scheduler(self.sensor_list, self.target_list, self.range_of_sensor, self) scheduler.run() self.duration = scheduler.duration self.statistics()
def RescheduleDBUpdate(self, schedule_label): current_sched = self._FindSchedule(schedule_label) # reschedule database update in 5 minutes locale.setlocale(locale.LC_ALL, 'C') start_time = time.localtime(time.time() + self._reschedule_delay) weekday = int(time.strftime('%w', start_time)) if weekday: weekday -= 1 else: weekday = 6 print 'rescheduling db update attempt %i at %s' % ( self._dbupdateattempts, time.strftime('%H:%M:%S', start_time)) rescheduled = Scheduler.Scheduler( 'Once', time.strftime('%H:%M:%S', start_time), weekday, False, win32gui.SendMessage, (self.hwnd, win32con.WM_COMMAND, self.MENU_UPDATE_DB, schedule_label), ('ClamWin_Scheduler_Info', 'ClamWin_Upadte_Time')) if current_sched is not None: current_sched.pause() rescheduled.start() self._schedulers.append(rescheduled) self._dbupdateattempts = self._dbupdateattempts + 1
# ------------------------------------------ # # INITIALIZE OUR REWRITE RULES # # ------------------------------------------ # story_rewrite_rules.append(Ambush_Rule) story_rewrite_rules.append(Caught_Rule) story_rewrite_rules.append(StKill_Rule) # ------------------------------------------ # # GENERATE A NARRATIVE # # ------------------------------------------ # main_graph.plot('output_initial') #~ command = "dot -Tpng output_initial.dot -o Initial_Graph.png" #~ os.system(command) sched = Scheduler(main_graph, story_initialization_rules, story_rewrite_rules, True, None) main_graph.generate_preconditions() initial_preconditions = main_graph.get_preconditions() #~ schedulers = [] schedulers.append(sched) final_graphs = [] failed_to_find_stories = [] stories = [] num_stories = 0 num_scheds = 0 num_iters = 0 run = True
def test_make_streamer_schedule(): ref_schedule = \ [ {"net_id": 0, "app_id": 4, "parent_id": -1, "input_layer": "input", "output_layer": "softmax", "channels": 3, "height": 299, "width": 299, "target_fps": 8, "shared": False, "model_path": "app4_model.pb" }, {"net_id": 1, "app_id": -1, "parent_id": -1, "input_layer": "input", "output_layer": "conv1", "channels": 3, "height": 299, "width": 299, "target_fps": 8, "shared": True, "model_path": "app1_model.pb" }, {"net_id": 2, "app_id": 1, "parent_id": 1, "input_layer": "conv1", "output_layer": "softmax", "channels": 3, "height": 299, "width": 299, "target_fps": 2, "shared": False, "model_path": "app1_model.pb" }, {"net_id": 3, "app_id": -1, "parent_id": 1, "input_layer": "conv1", "output_layer": "pool", "channels": 3, "height": 299, "width": 299, "target_fps": 8, "shared": True, "model_path": "app2_model.pb" }, {"net_id": 4, "app_id": 2, "parent_id": 3, "input_layer": "pool", "output_layer": "softmax", "channels": 3, "height": 299, "width": 299, "target_fps": 4, "shared": False, "model_path": "app2_model.pb" }, {"net_id": 5, "app_id": 3, "parent_id": 3, "input_layer": "pool", "output_layer": "softmax", "channels": 3, "height": 299, "width": 299, "target_fps": 8, "shared": False, "model_path": "app3_model.pb" } ] s = Scheduler.Scheduler("fnr", apps, video_desc, model_desc) s.num_frozen_list = [10, 30, 40, 0] s.target_fps_list = [2, 4, 8, 8] schedule = s.make_streamer_schedule() assert ref_schedule == schedule
def run(self): cur_len_in_s='' cur_len_in=0 len_left=LEN_LEN start=0 read_len=0 rec='' worker=Scheduler(block=True, receive=False) try: while(self.__open): sleep_needed=False if self.alive(): #sends done from main loop writers=[] #else: #not longer in main loop #writers=[self.__s] #print 'proxy is dead. add writer to select' reads, writes, errs = select.select([self.__s], writers, [], 1.5) if self.acquireLock(True): try: if self.__s in reads: #if self.acquireLock(True): read_now=self.__s.recv(4096) self.bytes_read+=len(read_now) if read_now=='': self.markDead() self.releaseLock() self.__open=False break #print 'Client.run: len read: '+str(len(rec)) rec=read(self.__s, rec+read_now, self.addSerialisables, lambda sock: None) self.factory.deserRemotes(self.__sers) self.__sers={} if self.getId() in self.factory and not self.getObj(self.getId()).alive(): print 'Client.run. closing socket' self.releaseLock() self.__open=False break #self.releaseLock() #else: # sleep_needed=True if self.__s in writes: if not self.alive(): print 'after select. proxy is dead and socket is writeable' #if self.acquireLock(): if not self.alive(): print 'after select. proxy is dead and socket is writeable and we have the lock' if len(self.__outbox)>0: if not self.alive(): print 'after select. proxy is dead and socket is writeable and we have the lock and we have something to send' self.send() else: sleep_needed=True #self.releaseLock() #else: # sleep_needed=True finally: self.releaseLock() if sleep_needed: sleep(0) except: if self.alive(): print 'exception in client' self.markDead() print_exc() if self.__open: self.quit() print 'Exiting Client.run'
yield client.send(("*** Less than 10 samples available").encode('latin-1')) yield client.close() #________________________________________________ timeStr = "" ambTempStr = "" outTempStr = "" mainRadTempStr = "" libRadTempStr = "" radSupTempStr = "" h2oInTempStr = "" h2oOutTempStr = "" numTempSamples = 0 scheduler = Scheduler() scheduler.new(server(8888)) temperatureController = DS2482.DS2482(address=0x18, busnum=2) #temperatureController = DS2482.DS2482() result = temperatureController.DS2482_reset() if result: pass else: print "*** DS2482_reset returned False" # Select Active PullUp - required when >1 sensor connected to bus temperatureController.DS2482_writeConfiguration(0x01) print "=================================================="
t = 95.7 replace_map = {'exec_delay': repr(t)} price = scheduler.EvalPrice(s, replace_map) if price >= 0: print("string '", s, "' gave price of", price, \ 'with exec_delay', t) else: print("string '", s, "' failed to parse with exec_delay", t) # unsafe_val = eval(s) # print "unsafe eval of string '",s,"' gave result",unsafe_val # Main scheduler = Scheduler.Scheduler(86400) strings = [ '5', '4.5', '4+7', '4.43*3/2-4.4', '25 % 7', '102.5-exec_delay', '2*(201-exec_delay)**2', 'math.cos(2)', 'True and 4', '(exec_delay < 100 and 1000) or 10', '[1,2,1]', 'abc', ]
def test_Scheduler_isnt_empty_after_add(): Sch = Scheduler.Scheduler() Sch.add((Pump, datetime.now() + timedelta(seconds=1))) assert len(Sch.Schedule) == 1
def test_Scheduler_is_empty_at_start(): Sch = Scheduler.Scheduler() assert len(Sch.Schedule) == 0