コード例 #1
0
def main():
    for i in range(p.Runs):
        clock = 0  # set clock to 0 at the start of the simulation
        if p.hasTrans:
            if p.Ttechnique == "Light":
                Transaction.create_transactions_light(
                )  # generate pending transactions
            elif p.Ttechnique == "Full":
                Transaction.create_transactions_full(
                )  # generate pending transactions

        Node.generate_gensis_block(
        )  # generate the gensis block for all miners
        Scheduler.initial_events()  # initiate initial events to start with

        while not Queue.isEmpty() and clock <= p.simTime:
            next_event = Queue.get_next_event()
            clock = next_event.time  # move clock to the time of the event
            Event.run_event(next_event)
            Queue.remove_event(next_event)

        Consensus.freshness_preferred(
        )  # apply the longest chain to resolve the forks
        Results.calculate(
        )  # calculate the simulation results (e.g., block statstics and miners' rewards)

        ########## reset all global variable before the next run #############
        Results.reset()  # reset all variables used to calculate the results
        Node.resetState(
        )  # reset all the states (blockchains) for all nodes in the network

    print("Percentage of blocks mined by the attacker: ",
          (p.attacker_blocks / float(p.Runs)))
コード例 #2
0
    def init_individual(self, ind_class, size):
        temp_jobs_list = copy.deepcopy(self.__jobs)
        temp_machines_list = copy.deepcopy(self.__machines)

        # 运行调度器 传递RGV配置
        s = Scheduler(temp_machines_list, 1, temp_jobs_list, self.__rgv_config)
        s.run(Heuristics.random_operation_choice, verbose=True)

        # 搜索所有作业对象和完成的操作
        list_activities = []
        for temp_job in temp_jobs_list:
            for temp_activity in temp_job.activities_done:
                activity = self.__jobs[temp_activity.id_job - 1].get_activity(
                    temp_activity.id_activity)
                operation = activity.get_operation(
                    temp_activity.operation_done.id_operation)
                list_activities.append(
                    (temp_activity.operation_done.time, activity, operation))
        # print(str(list_activities))
        # 以时间排序作业对象
        list_activities = sorted(list_activities, key=lambda x: x[0])
        individual = [(activity, operation)
                      for (_, activity, operation) in list_activities]
        del temp_jobs_list, temp_machines_list
        return ind_class(individual)
    def setUp(self):
        self.pcb1 = PCB(3, 5, 20, 40, 4)
        self.pcb2 = PCB(8, 10, 25, 42, 3)
        self.pcb3 = PCB(20, 30, 15, 45, 5)
        self.pcb4 = PCB(46, 49, 5, 6, 1)

        self.scheduler = Scheduler()
コード例 #4
0
ファイル: SchedulerLocal.py プロジェクト: belforte/CRAB2
    def configure(self, cfg_params):
        self.environment_unique_identifier = None
        self.cfg_params = cfg_params
        Scheduler.configure(self,cfg_params)
        self.jobtypeName = cfg_params['CRAB.jobtype']

        name=string.upper(self.name())
        self.queue = cfg_params.get(name+'.queue',None)

        self.res = cfg_params.get(name+'.resource',None)

        # minimal padding time for jobs. For local schedulers is disabled.
        # Added for alignment purpose only (and for test) with Grid schedulers
        self.minimal_job_duration = 0

        if (cfg_params.has_key(self.name()+'.env_id')): self.environment_unique_identifier = cfg_params[self.name()+'.env_id']
        ## is this ok?
        localDomainName = getLocalDomain(self)
        localPSNs = getListOfPSNsForThisDomain(localDomainName)
        # turn list to a string as in crab.cfg
        localPSNs = ",".join(localPSNs)
        if not cfg_params.has_key('GRID.se_white_list'):
            cfg_params['GRID.se_white_list']=localPSNs
            common.logger.info("Your domain name is "+str(localDomainName)+": only local dataset will be considered")
        else:
            common.logger.info("Your se_white_list is set to "+str(cfg_params['GRID.se_white_list'])+": only local dataset will be considered")
        return
コード例 #5
0
    def __init__(self):
        '''
		Args:
			--scheduler: type Scheduler, takes in the scheduler 
			--task_list: type list of Task, takes in the tasks
			--partition_list, type dict of partitions, the index of a partition is its id
			--sch_util, type float, total utilization of tasks scheduled successfully
		'''
        self._scheduler = Scheduler('best_fit')
        self._task_list = []
        self._partition_list = []
        #print type(self._partition_list)
        self._is_schedulable = True
        self._total_util = 0
        self._sch_util = 0
        self._total_num = 0
        self._sch_num = 0
        self._total_val = 0
        self._sch_val = 0
        self._state_now = []

        self._critical_time = []
        self._mapping = {
        }  #mapping is used to record the  map from task to partition
        self._to_leave_tasks = [
        ]  #records the tasks that will leave, it should be sorted by the leaving time
        self._task_counter = 0
        self._leaving_counter = 0
        self._time_now = 0  #did not consider the extreme conditions where no tasks are passed.
コード例 #6
0
    def _buildPackages(self, buildThreads):
        statusEvent = threading.Event()
        self._initializeScheduler(statusEvent)
        self._initializeThreadPool(statusEvent)

        for i in range(0, buildThreads):
            workerName = "WorkerThread" + str(i)
            ThreadPool.addWorkerThread(workerName)
            ThreadPool.startWorkerThread(workerName)

        statusEvent.wait()
        Scheduler.stopScheduling = True
        self.logger.debug("Waiting for all remaining worker threads")
        ThreadPool.join_all()

        setFailFlag = False
        allPackagesBuilt = False
        if Scheduler.isAnyPackagesFailedToBuild():
            setFailFlag = True

        if Scheduler.isAllPackagesBuilt():
            allPackagesBuilt = True

        if setFailFlag:
            self.logger.error("Some of the packages failed:")
            self.logger.error(Scheduler.listOfFailedPackages)
            raise Exception("Failed during building package")

        if not setFailFlag:
            if allPackagesBuilt:
                self.logger.debug("All packages built successfully")
            else:
                self.logger.error("Build stopped unexpectedly.Unknown error.")
                raise Exception("Unknown error")
コード例 #7
0
ファイル: AutoOn.py プロジェクト: gertjanmaas/HueControl
 def __init__(self):
     self.http = httplib2.Http()
     self.config = ConfigurationServer.get('AutoOn')
     if self.config == None:
         self.config = {
             'lat': 0,
             'long': 0,
             'city': 'Enter a city here...',
             'offset': 0,
             'autoOnGroup': 'All Lights',
             'groupBri': 255,
         }
     else:
         if not 'autoOnGroup' in self.config.keys():
             self.config['autoOnGroup'] = 'All Lights'
         if not 'groupBri' in self.config.keys():
             self.config['groupBri'] = 255
             
     minute = random.randint(0,59)
     start_date = datetime.datetime.combine(datetime.datetime.today(), datetime.time(hour=12, minute=minute, second=0)) #added randomness to not break earthtools :)
     if start_date < datetime.datetime.now():
         # get the sunset for today, the get_sunset function will take care of the rest
         self.get_sunset()
         start_date += datetime.timedelta(days=1)
     Scheduler.add_interval_job(self.get_sunset, days=1, start_date=start_date)
コード例 #8
0
 def __init__(self):
     self._executors = []
     self._num_executors = 0
     self._collector_ready = False
     self._res_list= []
     self._scheduler = Scheduler()
     self._logger = logging.getLogger("SparkContext")
コード例 #9
0
ファイル: QueueManager.py プロジェクト: aeslaughter/moose
    def __init__(self, harness, params):
        Scheduler.__init__(self, harness, params)

        # json storage
        self.__session_data = {}

        # a set containing any launched jobs
        self.__jobs = set([])

        # Open existing session file
        if os.path.exists(self.options.session_file):
            self.__status_check = True
            try:
                self.__session_file = open(self.options.session_file, 'r+')
                self.__session_data = json.load(self.__session_file)

                # Set some important things that affect findAndRunTests (input file, --re)
                json_args = self.getData('QUEUEMANAGER',
                                         options_regexp=True,
                                         options_input=True)
                self.options.reg_exp = json_args['options_regexp']
                self.options.input_file_name = json_args['options_input']

            except ValueError:
                raise QueueManagerError('Supplied session file: %s exists, but is not readable!' % (self.options.session_file))

        # session file does not exists. Create one instead.
        else:
            self.__status_check = False
            self.__session_file = self.__createSessionFile()
            self.putData('QUEUEMANAGER',
                         options_regexp=self.options.reg_exp,
                         options_input=self.options.input_file_name)

        self.params = params
コード例 #10
0
    def configure(self, cfg_params):
        self.environment_unique_identifier = None
        self.cfg_params = cfg_params
        Scheduler.configure(self, cfg_params)
        self.jobtypeName = cfg_params['CRAB.jobtype']

        name = string.upper(self.name())
        self.queue = cfg_params.get(name + '.queue', None)

        self.res = cfg_params.get(name + '.resource', None)

        # minimal padding time for jobs. For local schedulers is disabled.
        # Added for alignment purpose only (and for test) with Grid schedulers
        self.minimal_job_duration = 0

        if (cfg_params.has_key(self.name() + '.env_id')):
            self.environment_unique_identifier = cfg_params[self.name() +
                                                            '.env_id']
        ## is this ok?
        localDomainName = getLocalDomain(self)
        localPSNs = getListOfPSNsForThisDomain(localDomainName)
        # turn list to a string as in crab.cfg
        localPSNs = ",".join(localPSNs)
        if not cfg_params.has_key('GRID.se_white_list'):
            cfg_params['GRID.se_white_list'] = localPSNs
            common.logger.info("Your domain name is " + str(localDomainName) +
                               ": only local dataset will be considered")
        else:
            common.logger.info("Your se_white_list is set to " +
                               str(cfg_params['GRID.se_white_list']) +
                               ": only local dataset will be considered")
        return
コード例 #11
0
def run():
    s = Scheduler()
    for i in range(1, 11):
        s.add_task(dummy_test, (), {}, name='task_{}'.format(i))
    output = s.wait_for_completion()
    LOG.debug("Total wait from individual threads %d", sum(output.values()))
    print output
コード例 #12
0
ファイル: main.py プロジェクト: tanmayshankar/InViSyBlE
def runInViSyBlE():
    SIFTObjectDetector.loadDatabase("/home/venkat/Documents/Projects/InViSyBle/ObjectDatabase/")
    FaceRecognizer.loadDatabase("/home/venkat/Documents/Projects/InViSyBle/FaceDatabase/")

    #cap = cv2.VideoCapture(0)
    #getFrame = GetFrame()
    #getBWFrame = GetBWFrame()
    scheduler = Scheduler()
    scheduler.updateComputationList([GetFrame, SIFTObjectDetector.SIFTObjectDetector, FaceDetector, FaceRecognizer.FaceRecognizer])

    while(True):#cap.isOpened()):
        #ret, frame = cap.read()

        #gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
        #frame = getFrame((0,0), None)
        #frame, frameId = getBWFrame(frame, None)
        res = scheduler.compute()
        if None in res:
            continue
        frame, frameId = res[0]
        detectedObjects = res[1]
        detectedFaces = res[3]

        #draw face rectangles
        faces = res[2]
        for (x,y,w,h) in faces:
            cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)

        cv2.imshow('frame',frame)
        print detectedObjects, detectedFaces
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    #cap.release()
    cv2.destroyAllWindows()
コード例 #13
0
    def configure(self, cfg_params):
        self.cfg_params = cfg_params
        self.jobtypeName = cfg_params.get('CRAB.jobtype', '')
        self.schedulerName = cfg_params.get('CRAB.scheduler', '')
        Scheduler.configure(self, cfg_params)
        #self.proxyValid=0

        #self.dontCheckProxy=int(cfg_params.get("GRID.dont_check_proxy",0))
        self.space_token = cfg_params.get("USER.space_token", None)
        self.proxyServer = 'myproxy.cern.ch'
        #self.group = cfg_params.get("GRID.group", None)
        #self.role = cfg_params.get("GRID.role", None)

        removeBList = cfg_params.get("GRID.remove_default_blacklist", 0)
        blackAnaOps = None
        if int(removeBList) == 0:
            blacklist = Downloader(
                "http://cmsdoc.cern.ch/cms/LCG/crab/config/")
            result = blacklist.config("site_black_list.conf")
            if result != None:
                blackAnaOps = result
            common.logger.debug("Enforced black list: %s " % blackAnaOps)
        else:
            common.logger.info("WARNING: Skipping default black list!")

        self.EDG_ce_black_list = None
        if cfg_params.has_key(
                'GRID.ce_black_list') and cfg_params['GRID.ce_black_list']:
            self.EDG_ce_black_list = cfg_params.get('GRID.ce_black_list')
            if int(removeBList) == 0 and blackAnaOps:
                self.EDG_ce_black_list += ",%s" % blackAnaOps
        elif int(removeBList) == 0 and blackAnaOps:
            self.EDG_ce_black_list = blackAnaOps
        if self.EDG_ce_black_list:
            self.EDG_ce_black_list = str(self.EDG_ce_black_list).split(',')

        self.EDG_ce_white_list = cfg_params.get('GRID.ce_white_list', None)
        if (self.EDG_ce_white_list):
            self.EDG_ce_white_list = str(self.EDG_ce_white_list).split(',')

        self.VO = cfg_params.get('GRID.virtual_organization', 'cms')

        self.EDG_clock_time = cfg_params.get('GRID.max_wall_clock_time', None)

        # Default minimum CPU time to >= 130 minutes
        self.EDG_cpu_time = cfg_params.get('GRID.max_cpu_time', '130')

        ## Add EDG_WL_LOCATION to the python path
        #if not self.CRAB_useServer and not self.CRAB_serverName:
        #    if not os.environ.has_key('EDG_WL_LOCATION'):
        #        msg = "Error: the EDG_WL_LOCATION variable is not set."
        #        raise CrabException(msg)
        #    path = os.environ['EDG_WL_LOCATION']
        #    libPath=os.path.join(path, "lib")
        #    sys.path.append(libPath)
        #    libPath=os.path.join(path, "lib", "python")
        #    sys.path.append(libPath)

        self.checkProxy()
        return
コード例 #14
0
    def setOutputs(self):
        self.output.clear()
        self.output_list.clear()
        pList = copy.deepcopy(self.pro_list)
        if self.algorithm == "Priority" and self.preemptive:
            self.output = Scheduler().priority_preemptive(pList)
        elif self.algorithm == "Priority" and not self.preemptive:
            self.output = Scheduler().priority_nonpreemptive(pList)
        elif self.algorithm == "SJF" and self.preemptive:
            self.output = Scheduler().SJF_Preemptive(pList)
        elif self.algorithm == "SJF" and not self.preemptive:
            self.output = Scheduler().SJF_nonPreemptive(pList)
        elif self.algorithm == "Round Robin":
            self.output = Scheduler().roundRobin(pList, self.quatum)

        if len(self.output):
            prev_tSlot = self.output[0]
            duration = 0
            for tSlot in self.output:
                if tSlot != prev_tSlot:
                    self.output_list.append({
                        "Name": prev_tSlot,
                        "duration": duration
                    })
                    duration = 0
                if tSlot != "NOP":
                    duration += 1
                    prev_tSlot = tSlot
            if prev_tSlot != "NOP":
                self.output_list.append({
                    "Name": prev_tSlot,
                    "duration": duration
                })
コード例 #15
0
 def __init__(self, controller, evtProcessor):
     '''
     Constructor
     '''
     self.controller = controller
     self.eventProcessor = evtProcessor
     self.scheduler = Scheduler()
コード例 #16
0
ファイル: StartHueControl.py プロジェクト: Dragor2/HueControl
 def startup(self, bridge_ip):
     from Plugins import Plugins
     from HueBridge import HueBridge
     HueBridge.init(bridge_ip)
     HueBridge.update()
     self.plugins = Plugins #expose it for cherrypy
     # Automatically update every minute
     Scheduler.add_interval_job(HueBridge.update, minutes=1)
コード例 #17
0
 def startup(self, bridge_ip):
     from Plugins import Plugins
     from HueBridge import HueBridge
     HueBridge.init(bridge_ip)
     HueBridge.update()
     self.plugins = Plugins  #expose it for cherrypy
     # Automatically update every minute
     Scheduler.add_interval_job(HueBridge.update, minutes=1)
コード例 #18
0
    def _buildGivenPackages(self, listPackages, buildThreads):
        # Extend listPackages from ["name1", "name2",..] to ["name1-vers1", "name2-vers2",..]
        listPackageNamesAndVersions = []
        for pkg in listPackages:
            for version in SPECS.getData().getVersions(pkg):
                listPackageNamesAndVersions.append(pkg + "-" + version)

        if constants.rpmCheck:
            listMakeCheckPackages = set()
            for pkg in listPackages:
                version = SPECS.getData().getHighestVersion(pkg)
                listMakeCheckPackages.add(pkg + "-" + version)
            alreadyBuiltRPMS = self._readAlreadyAvailablePackages()
            listPackageNamesAndVersions = (list(
                set(listPackageNamesAndVersions)
                | (listMakeCheckPackages - alreadyBuiltRPMS)))

        returnVal = self._calculateParams(listPackageNamesAndVersions)
        if not returnVal:
            self.logger.error(
                "Unable to set paramaters. Terminating the package manager.")
            raise Exception("Unable to set paramaters")

        statusEvent = threading.Event()
        self._initializeScheduler(statusEvent)
        self._initializeThreadPool(statusEvent)

        for i in range(0, buildThreads):
            workerName = "WorkerThread" + str(i)
            ThreadPool.addWorkerThread(workerName)
            ThreadPool.startWorkerThread(workerName)

        statusEvent.wait()
        Scheduler.stopScheduling = True
        self.logger.info("Waiting for all remaining worker threads")
        ThreadPool.join_all()

        setFailFlag = False
        allPackagesBuilt = False
        if Scheduler.isAnyPackagesFailedToBuild():
            setFailFlag = True

        if Scheduler.isAllPackagesBuilt():
            allPackagesBuilt = True

        if setFailFlag:
            self.logger.error("Some of the packages failed:")
            self.logger.error(Scheduler.listOfFailedPackages)
            raise Exception("Failed during building package")

        if not setFailFlag:
            if allPackagesBuilt:
                self.logger.info("All packages built successfully")
            else:
                self.logger.error("Build stopped unexpectedly.Unknown error.")
                raise Exception("Unknown error")

        self.logger.info("Terminated")
コード例 #19
0
 def __init__(self, name, layers_string):
     self.name = name
     self.node_list = []
     self.nova_client = NovaClient.get_instance()
     self.instance_list = []
     self.protected_layers_string = layers_string
     self.config = ConfigParser.RawConfigParser()
     self.config.read('/etc/hass.conf')
     self.scheduler = Scheduler()
コード例 #20
0
ファイル: SchedulerGrid.py プロジェクト: belforte/CRAB2
    def configure(self, cfg_params):
        self.cfg_params = cfg_params
        self.jobtypeName   = cfg_params.get('CRAB.jobtype','')
        self.schedulerName = cfg_params.get('CRAB.scheduler','')
        Scheduler.configure(self,cfg_params)
        #self.proxyValid=0

        #self.dontCheckProxy=int(cfg_params.get("GRID.dont_check_proxy",0)) 	 
        self.space_token = cfg_params.get("USER.space_token",None) 	 
        self.proxyServer= 'myproxy.cern.ch'
        #self.group = cfg_params.get("GRID.group", None) 	 
        #self.role = cfg_params.get("GRID.role", None)

        removeBList = cfg_params.get("GRID.remove_default_blacklist", 0 )
        blackAnaOps = None
        if int(removeBList) == 0:
            blacklist = Downloader("http://cmsdoc.cern.ch/cms/LCG/crab/config/")
            result = blacklist.config("site_black_list.conf")
            if result != None:
                blackAnaOps = result
            common.logger.debug("Enforced black list: %s "%blackAnaOps)
        else:
            common.logger.info("WARNING: Skipping default black list!")

        self.EDG_ce_black_list = None
        if cfg_params.has_key('GRID.ce_black_list') and cfg_params['GRID.ce_black_list']:
            self.EDG_ce_black_list = cfg_params.get('GRID.ce_black_list')
            if int(removeBList) == 0 and blackAnaOps: 
                self.EDG_ce_black_list += ",%s"%blackAnaOps
        elif int(removeBList) == 0 and blackAnaOps:
            self.EDG_ce_black_list = blackAnaOps
        if self.EDG_ce_black_list:
            self.EDG_ce_black_list = str(self.EDG_ce_black_list).split(',')

        self.EDG_ce_white_list = cfg_params.get('GRID.ce_white_list',None)
        if (self.EDG_ce_white_list): self.EDG_ce_white_list = str(self.EDG_ce_white_list).split(',')

        self.VO = cfg_params.get('GRID.virtual_organization','cms')

        self.EDG_clock_time = cfg_params.get('GRID.max_wall_clock_time',None)

        # Default minimum CPU time to >= 130 minutes
        self.EDG_cpu_time = cfg_params.get('GRID.max_cpu_time', '130')

        ## Add EDG_WL_LOCATION to the python path
        #if not self.CRAB_useServer and not self.CRAB_serverName:
        #    if not os.environ.has_key('EDG_WL_LOCATION'):
        #        msg = "Error: the EDG_WL_LOCATION variable is not set."
        #        raise CrabException(msg)
        #    path = os.environ['EDG_WL_LOCATION']
        #    libPath=os.path.join(path, "lib")
        #    sys.path.append(libPath)
        #    libPath=os.path.join(path, "lib", "python")
        #    sys.path.append(libPath)

        self.checkProxy()
        return
コード例 #21
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('config')
    args = parser.parse_args()
    config = Config(args.config)
    scheduler = Scheduler()
    schedule_list = scheduler.run(config)
    for Gp in schedule_list:
        Gp.view()
コード例 #22
0
def start_scheduler_service():
    """Starts the Scheduler component"""

    sched = Scheduler(maxJobs=MAX_JOBS,
                      unitCPU=CPU_UNIT,
                      unitMem=MEM_UNIT,
                      maxCPU=MAX_CPU,
                      portLower=PORT_RANGE_LOWER,
                      portUpper=PORT_RANGE_UPPER,
                      strategy=STRATEGY)
    sched.start()
コード例 #23
0
def test_get_percent_observed_targets():
    sen1 = Sensor(2, 2, Point(0, 0))
    sen2 = Sensor(2, 2, Point(1, 0))
    tar1 = Target(Point(1, 1))
    tar2 = Target(Point(4, 4))
    tar4 = Target(Point(5, 5))
    tar3 = Target(Point(3, 0))
    sensor_list = [sen1, sen2]
    targest_list = [tar1, tar2, tar3, tar4]
    a = Scheduler(sensor_list, targest_list, 2, 2)
    assert a.get_percent_observed_targets() == 50
コード例 #24
0
 def __init__(self, name):
     Scheduler.__init__(self,name)
     self.states = [ "Acl", "cancelReason", "cancelling","ce_node","children", \
                   "children_hist","children_num","children_states","condorId","condor_jdl", \
                   "cpuTime","destination", "done_code","exit_code","expectFrom", \
                   "expectUpdate","globusId","jdl","jobId","jobtype", \
                   "lastUpdateTime","localId","location", "matched_jdl","network_server", \
                   "owner","parent_job", "reason","resubmitted","rsl","seed",\
                   "stateEnterTime","stateEnterTimes","subjob_failed", \
                   "user tags" , "status" , "status_code","hierarchy"]
     return
コード例 #25
0
ファイル: PackageManager.py プロジェクト: vMarkus/photon
    def buildGivenPackages(self, listPackages):
        returnVal = self.calculateParams(listPackages)
        if not returnVal:
            self.logger.error(
                "Unable to set paramaters. Terminating the package manager.")
            return False

        statusEvent = threading.Event()
        numWorkerThreads = self.calculatePossibleNumWorkerThreads()
        if numWorkerThreads > 8:
            numWorkerThreads = 8
        if numWorkerThreads == 0:
            return False

        self.initializeScheduler(statusEvent)
        self.initializeThreadPool(statusEvent)

        i = 0
        while i < numWorkerThreads:
            workerName = "WorkerThread" + str(i)
            ThreadPool.addWorkerThread(workerName)
            ThreadPool.startWorkerThread(workerName)
            i = i + 1

        statusEvent.wait()
        Scheduler.stopScheduling = True
        self.logger.info("Waiting for all remaining worker threads")
        listWorkerObjs = ThreadPool.getAllWorkerObjects()
        for w in listWorkerObjs:
            w.join()

        setFailFlag = False
        allPackagesBuilt = False

        if Scheduler.isAnyPackagesFailedToBuild():
            setFailFlag = True

        if Scheduler.isAllPackagesBuilt():
            allPackagesBuilt = True

        if setFailFlag:
            self.logger.error("Some of the packages failed:")
            self.logger.error(Scheduler.listOfFailedPackages)
            return False

        if not setFailFlag:
            if allPackagesBuilt:
                self.logger.info("All packages built successfully")
            else:
                self.logger.error("Build stopped unexpectedly.Unknown error.")
                return False

        self.logger.info("Terminated")
        return True
コード例 #26
0
 def __init__(self, name):
     Scheduler.__init__(self, name)
     self.states = [ "Acl", "cancelReason", "cancelling","ce_node","children", \
                   "children_hist","children_num","children_states","condorId","condor_jdl", \
                   "cpuTime","destination", "done_code","exit_code","expectFrom", \
                   "expectUpdate","globusId","jdl","jobId","jobtype", \
                   "lastUpdateTime","localId","location", "matched_jdl","network_server", \
                   "owner","parent_job", "reason","resubmitted","rsl","seed",\
                   "stateEnterTime","stateEnterTimes","subjob_failed", \
                   "user tags" , "status" , "status_code","hierarchy"]
     return
コード例 #27
0
 def main(self):
     scrape = Scraper(self.from_ct, self.date, self.time)
     data = scrape.fetch_full()
     nj = NJParser(data[0]).parse_data()
     ct = CTParser(data[1]).parse_data()
     if self.from_ct:
         schedule = Scheduler(ct, nj, self.low, self.high).generate()
     else:
         schedule = Scheduler(nj, ct, self.low, self.high).generate()
     message = "Train schedules for " + self.date.strftime('%Y-%m-%d')
     Emailer(self.password, self.email,
             self.file).send_email(message + ":\n" + schedule, message)
コード例 #28
0
ファイル: PackageManager.py プロジェクト: gijs/photon-1
 def buildGivenPackages (self, listPackages):
     returnVal=self.calculateParams(listPackages)
     if not returnVal:
         self.logger.error("Unable to set paramaters. Terminating the package manager.")
         return False
     
     statusEvent=threading.Event()
     numWorkerThreads=self.calculatePossibleNumWorkerThreads()
     if numWorkerThreads > 8:
         numWorkerThreads = 8
     if numWorkerThreads == 0:
         return False
      
     self.initializeScheduler(statusEvent)
     self.initializeThreadPool(statusEvent)
     
     i=0
     while i < numWorkerThreads:
         workerName="WorkerThread"+str(i)
         ThreadPool.addWorkerThread(workerName)
         ThreadPool.startWorkerThread(workerName)
         i = i + 1
     
     statusEvent.wait()
     Scheduler.stopScheduling=True
     self.logger.info("Waiting for all remaining worker threads")
     listWorkerObjs=ThreadPool.getAllWorkerObjects()
     for w in listWorkerObjs:
         w.join()
         
     setFailFlag=False
     allPackagesBuilt=False
     
     if Scheduler.isAnyPackagesFailedToBuild():
         setFailFlag=True
     
     if Scheduler.isAllPackagesBuilt():
         allPackagesBuilt=True
     
     if setFailFlag:
         self.logger.error("Some of the packages failed:")
         self.logger.error(Scheduler.listOfFailedPackages)
         return False
     
     if not setFailFlag:
         if allPackagesBuilt:
             self.logger.info("All packages built successfully")
         else:
             self.logger.error("Build stopped unexpectedly.Unknown error.")
             return False
     
     self.logger.info("Terminated")
     return True
コード例 #29
0
ファイル: __main__.py プロジェクト: SimonJPegg/py_temp
def main():
    temp_mon = TemperatureMonitor(
        error_temp=ERROR_TEMP,
        min_temp=MIN_TEMP,
        max_temp=MAX_TEMP,
        set_heating_below_max=SET_HEATING_ON_BELOW_MAX)
    heating_controller = HeatingController(
        gpio_output_downstairs=GPIO_OUTPUT_DOWNSTAIRS,
        gpio_output_upstairs=GPIO_OUTPUT_UPSTAIRS,
        temp_monitor=temp_mon)
    scheduler = Scheduler(heating_controller=heating_controller)
    scheduler.start()
コード例 #30
0
ファイル: PackageManager.py プロジェクト: one3chens/photon
    def buildGivenPackages(self, listPackages, buildThreads):
        if constants.rpmCheck:
            alreadyBuiltRPMS = self.readAlreadyAvailablePackages()
            listPackages = list(
                set(listPackages)
                | (set(constants.listMakeCheckRPMPkgtoInstall) -
                   set(alreadyBuiltRPMS)))

        returnVal = self.calculateParams(listPackages)
        if not returnVal:
            self.logger.error(
                "Unable to set paramaters. Terminating the package manager.")
            raise Exception("Unable to set paramaters")

        statusEvent = threading.Event()
        self.initializeScheduler(statusEvent)
        self.initializeThreadPool(statusEvent)

        i = 0
        while i < buildThreads:
            workerName = "WorkerThread" + str(i)
            ThreadPool.addWorkerThread(workerName)
            ThreadPool.startWorkerThread(workerName)
            i = i + 1

        statusEvent.wait()
        Scheduler.stopScheduling = True
        self.logger.info("Waiting for all remaining worker threads")
        listWorkerObjs = ThreadPool.getAllWorkerObjects()
        for w in listWorkerObjs:
            w.join()

        setFailFlag = False
        allPackagesBuilt = False
        if Scheduler.isAnyPackagesFailedToBuild():
            setFailFlag = True

        if Scheduler.isAllPackagesBuilt():
            allPackagesBuilt = True

        if setFailFlag:
            self.logger.error("Some of the packages failed:")
            self.logger.error(Scheduler.listOfFailedPackages)
            raise Exception("Failed during building package")

        if not setFailFlag:
            if allPackagesBuilt:
                self.logger.info("All packages built successfully")
            else:
                self.logger.error("Build stopped unexpectedly.Unknown error.")
                raise Exception("Unknown error")

        self.logger.info("Terminated")
コード例 #31
0
ファイル: env.py プロジェクト: DDeChoU/RLonTM
 def simulate_best_fit(self):
     tl = copy.deepcopy(self._task_list)
     pl = copy.deepcopy(self._partition_list)
     m = Model()
     m.reset(tl, pl)
     s = Scheduler('best_fit')
     for task in tl:
         action = s.schedule(task, pl)
         _, _, done, _ = m.step(action)
         if done:
             break
     return m.get_unit_ratio()
コード例 #32
0
def main():
    while True:
        if thread_active:
            data = camera_config
            switch_flag = 0
            my_schedule = Scheduler(data)
            logging.info("Loaded Scheduler")
            sleep(3)                                    
            while thread_active:
                my_schedule.update_current_time()
                slot = my_schedule.should_start()
                if switch_flag == 0:
                        logging.info("Stop: " + str(datetime.now()))
                        switch_flag = 1
コード例 #33
0
    def process_gateway_transaction_pools():
        tx_token_time = 0.0

        # Loop processing all the transaction in the system
        while not BlockCommit.transcations_procesed():

            tx_list_inserted = False

            # Randomly allocate transcation token to a gateway
            gateway_node = random.choice(p.NODES[0:p.Gn])

            # Sort the transaction by receive time in ascending order
            gateway_node.transactionsPool.sort(key=lambda tx: tx.timestamp[1])
            tx_pool_size = len(gateway_node.transactionsPool)
            tx_list = []

            # Any transcations in the pool
            if tx_pool_size > 0:
                tx_count = min(p.txListSize, tx_pool_size)

                # Append any valid transaction to the transaction list
                for tx in gateway_node.transactionsPool[0:tx_count]:
                    if tx.timestamp[1] <= tx_token_time:
                        tx_list.append(tx)

                # If there are transactions in the list schedule append and propagation events
                if len(tx_list) > 0:
                    Scheduler.append_tx_list_event(tx_list, gateway_node.id,
                                                   tx_token_time, 0)
                    BlockCommit.schedule_event_prop_tx_list(
                        tx_list, gateway_node.gatewayIds, tx_token_time)

                    # Remove transactions from local transaction pool
                    for tx in tx_list:
                        gateway_node.transactionsPool.remove(tx)

                    if p.maxTxListSize < len(tx_list):
                        p.maxTxListSize = len(tx_list)
                    tx_list_inserted = True

            # Release the transaction token
            if tx_list_inserted:
                tx_token_time = tx_token_time + Network.tx_list_prop_delay() + \
                    Network.tx_token_release_delay()
            else:
                tx_token_time = tx_token_time + Network.tx_token_release_delay(
                )

        # Process all the transaction events in the queue
        BlockCommit.process_queue()
コード例 #34
0
def run_main():
    if cfg.RUNNING_ON_HEROKU:
        print "Starting in HEROKU mode"

    lg = logger.Logger()

    print "logging in reddit"
    bot = ParenthesesBot()
    bot.login()

    subreddit = bot.r.get_subreddit("+".join(cfg.WATCHED_SUBREDDITS))

    sc = Scheduler()
    sc.set_job_interval("poster", cfg.TIMING_POSTER_CYCLE)
    sc.set_job_interval("check_config", cfg.TIMING_CONFIG_CHECK)
    sc.set_job_interval("report_stats", cfg.TIMING_REPORT_STATS)
    sc.set_job_interval("check_inbox", cfg.TIMING_CHECK_INBOX)

    while True:

        try:
            print "Starting cycle"
            run_mainloop(bot, subreddit, lg, sc)

        except Exception as e:
            lg.log(str(e), logger.CRITICAL)
            time.sleep(cfg.TIMING_ERROR_RESTART)

    return
コード例 #35
0
ファイル: AutoOn.py プロジェクト: gertjanmaas/HueControl
 def get_sunset(self):
     # 0,0 is a gps coordinate somewhere in the South Atlantic Ocean, hopefully nobody uses Hue there :)
     if self.config['lat'] != 0 and self.config['long'] != 0:
         now = datetime.datetime.now()
         request_url = "{}/{}/{}/{}/{}/99/1".format(EARTHTOOLS_URL, self.config['lat'], self.config['long'], now.day, now.month)
         resp, content = self.http.request(request_url, method="GET")
         if int(resp['status']) == 200:
             xml = ElementTree.fromstring(content)
             sunset = xml.find(".//evening/sunset")
             sunset_time = time.strptime(sunset.text, "%H:%M:%S")
             sunset_datetime = datetime.datetime(now.year, now.month, now.day, sunset_time.tm_hour, sunset_time.tm_min, sunset_time.tm_sec) + datetime.timedelta(minutes=self.config['offset'])
             if sunset_datetime > datetime.datetime.now():
                 cherrypy.log("AutoOn: Turning lights on @ {}".format(sunset_datetime))
                 Scheduler.add_date_job(self.turn_lights_on, sunset_datetime)
コード例 #36
0
ファイル: PackageManager.py プロジェクト: megacoder/photon
    def buildGivenPackages (self, listPackages, buildThreads):
        if constants.rpmCheck:
            alreadyBuiltRPMS=self.readAlreadyAvailablePackages()
            listPackages=list(set(listPackages)|(set(constants.listMakeCheckRPMPkgtoInstall)-set(alreadyBuiltRPMS)))

        returnVal=self.calculateParams(listPackages)
        if not returnVal:
            self.logger.error("Unable to set paramaters. Terminating the package manager.")
            raise Exception("Unable to set paramaters")

        statusEvent=threading.Event()
        self.initializeScheduler(statusEvent)
        self.initializeThreadPool(statusEvent)

        i=0
        while i < buildThreads:
            workerName="WorkerThread"+str(i)
            ThreadPool.addWorkerThread(workerName)
            ThreadPool.startWorkerThread(workerName)
            i = i + 1

        statusEvent.wait()
        Scheduler.stopScheduling=True
        self.logger.info("Waiting for all remaining worker threads")
        listWorkerObjs=ThreadPool.getAllWorkerObjects()
        for w in listWorkerObjs:
            w.join()

        setFailFlag=False
        allPackagesBuilt=False
        if Scheduler.isAnyPackagesFailedToBuild():
            setFailFlag=True

        if Scheduler.isAllPackagesBuilt():
            allPackagesBuilt=True

        if setFailFlag:
            self.logger.error("Some of the packages failed:")
            self.logger.error(Scheduler.listOfFailedPackages)
            raise Exception("Failed during building package")

        if not setFailFlag:
            if allPackagesBuilt:
                self.logger.info("All packages built successfully")
            else:
                self.logger.error("Build stopped unexpectedly.Unknown error.")
                raise Exception("Unknown error")

        self.logger.info("Terminated")
コード例 #37
0
 def __init__(self, rho, seed=None):
     """Starts the simulator using 'rho' as its utilization.
     This parameter cannot be changed later"""
     self.__rho = rho
     self.__start_time = 0  # start_time of simulation rounds
     self.__current_time = 0  # current time of the simulator (state variable kept over function calls)
     self.__queue = [
     ]  # queue of the simulator (state variable kept over function calls)
     self.__server_idle = True  # state of the server of the simulator (state variable kept over function calls)
     self.__scheduler = Scheduler(
         rho, seed)  # Scheduler (state variable kept over function calls)
     self.__waiting_times = [
     ]  # list of waiting times of the costumers, for statistics
     self.__areas = [
     ]  # list of number of waiting costumers, for statistics
コード例 #38
0
def notifyPackageBuildCompleted():
    logger.disabled = False
    if 'status' not in flask.request.json or 'package' not in flask.request.json:
        return {'message', 'missing package or status in request'}, BAD_REQUEST

    if flask.request.json['status'] == 0:
        Scheduler.notifyPackageBuildCompleted(flask.request.json['package'])
        logger.info("Build Success %s"%flask.request.json['package'])
    elif flask.request.json['status'] == -1:
        Scheduler.notifyPackageBuildFailed(flask.request.json['package'])
        logger.info("Build Failed %s"%flask.request.json['package'])
    else:
        return {'message', 'wrong status'}, NOT_ACCEPTABLE
    logger.disabled = True
    return {'message': 'master notified successfully'}, SUCCESS
コード例 #39
0
    def __init__(self, justPlots = False):
        self.__name__ = "Core"
        
        self.configManager = ConfigurationManager()
        
        # These return True of False depending on whether loading the conf was a success.
        # It should be checked if the conf was loaded successfully and failures should be logged.
        self.configManager.loadConf(CONFIG_CORE, True)
        self.configManager.loadConf(CONFIG_SETTINGS, True)
        self.configManager.loadConf(CONFIG_FORMS, True)
        self.configManager.loadConf(CONFIG_URLMAP, True)
        self.configManager.loadConf(CONFIG_MESSAGES, True)
        
        self.moduleManager = ModuleManager(self)
        self.settingsManager = SettingsManager(self)
        self.clientManager = ClientManager(self)
        self.sensorManager = SensorManager(self)
        self.deviceManager = DeviceManager(self)
        self.taskManager = TaskManager(self)
        self.messageManager = MessageManager(self)
        self.logging = Logging(self)

        if self.settingsManager.equals("plottype", "matplotlib"):
            from Plot import Plot
            self.plot = Plot(self)

        self.protocol = Protocol(self)
        if not justPlots: self.connection = Connection(self)
        if not justPlots: self.scheduler = Scheduler()
        if not justPlots: self.webServer = WebServer(self.connection.getLocalIP(), self.settingsManager.getValueByName("listenport")) # Currently binds to localhost. But this needs to be fixed so other connections can be listened to too.
コード例 #40
0
    def __init__(self, portTypes = {} ):
        Scheduler.__init__(self)
        self.motors = { 'A': Motor(BP.PORT_A, self), 'B': Motor(BP.PORT_B, self), 'C': Motor(BP.PORT_C, self), 'D': Motor(BP.PORT_D, self) }
        self.sensors = {  }
        BP.BrickPiSetup()  # setup the serial port for communication

        for port, sensorType in portTypes.items():
            if isinstance(sensorType, int):
                sensor = Sensor(port, sensorType)
            else:
                sensor = sensorType(port)
            self.sensors[sensor.idChar] = sensor
            BP.BrickPi.SensorType[sensor.port] = sensor.type
        BP.BrickPiSetupSensors()       #Send the properties of sensors to BrickPi

        self.setUpdateCoroutine( self.updaterCoroutine() )
コード例 #41
0
ファイル: UI.py プロジェクト: calebawatts/Shift-Scheduler
    def __start(self, month=None, year=None):
        '''
            Does basic configuration from user input
        '''
        if (month == None or year == None) or (month not in range(1,13) and year <= 0):
            self.printTitle()
            print("Let's get started by filling out some basic information:")
            self.year = self.getInputOfType("Enter the current year: ", 'int', range(0,2500))
            self.month = self.getInputOfType("Enter the current month (numeric): ", 'int', range(1,13))

        self.scheduler = Scheduler(self.month, self.year)

        name = self.getInputOfType("[Optional] Enter name of your restaurant: ", "str", range(0,20), True)
        if name != "skip":
            self.schedName = name
        
        self.printTitle()
        print("Scheduler configured for %s %s"%(self.monthNames[self.month], self.year))
        print("")
        print("Here is an overview of the month:")
        self.printCalendar()
        print("")
        print("You should start by adding Employees and then specifying availability/rules for each employee. " +
            "Then set shifts for each day/week of the month.")
        print("")
        print("Type help at any time for help using this app.\n")
コード例 #42
0
ファイル: QueueManager.py プロジェクト: zachmprince/moose
    def __init__(self, harness, params):
        Scheduler.__init__(self, harness, params)

        # json storage
        self.__session_data = {}

        # Open existing session file
        if os.path.exists(self.options.session_file):
            self.__status_check = True
            try:
                self.__session_file = open(self.options.session_file, 'r+')
                self.__session_data = json.load(self.__session_file)

                # Set some important things that affect findAndRunTests (input file, --re)
                json_args = self.getData('QUEUEMANAGER',
                                         options_regexp=True,
                                         options_input=True,
                                         options_timing=True)
                self.options.input_file_name = json_args['options_input']

                # Honor any new reg_exp supplied by the user
                if self.options.reg_exp:
                    pass
                else:
                    self.options.reg_exp = json_args['options_regexp']

                # Only allow timing if user is asking, and user supplied those options
                # during initial launch phase (otherwise perflog will not be available).
                if not json_args['options_timing'] and self.options.timing:
                    self.options.timing = False


            except ValueError:
                print('Supplied session file: %s exists, but is not readable!' % (self.options.session_file))
                sys.exit(1)

        # session file does not exists. Create one instead.
        elif not self.options.queue_cleanup:
            self.__status_check = False
            self.__session_file = self.__createSessionFile()
            self.putData('QUEUEMANAGER',
                         options_regexp=self.options.reg_exp,
                         options_input=self.options.input_file_name,
                         options_timing=self.options.timing)

        self.params = params
コード例 #43
0
ファイル: PackageManager.py プロジェクト: frapposelli/photon
    def _buildGivenPackages(self, listPackages, buildThreads):
        # Extend listPackages from ["name1", "name2",..] to ["name1-vers1", "name2-vers2",..]
        listPackageNamesAndVersions=set()
        for pkg in listPackages:
            base = SPECS.getData().getSpecName(pkg)
            for version in SPECS.getData().getVersions(base):
                listPackageNamesAndVersions.add(base+"-"+version)

        returnVal = self._calculateParams(listPackageNamesAndVersions)
        if not returnVal:
            self.logger.error("Unable to set parameters. Terminating the package manager.")
            raise Exception("Unable to set parameters")

        statusEvent = threading.Event()
        self._initializeScheduler(statusEvent)
        self._initializeThreadPool(statusEvent)

        for i in range(0, buildThreads):
            workerName = "WorkerThread" + str(i)
            ThreadPool.addWorkerThread(workerName)
            ThreadPool.startWorkerThread(workerName)

        statusEvent.wait()
        Scheduler.stopScheduling = True
        self.logger.debug("Waiting for all remaining worker threads")
        ThreadPool.join_all()

        setFailFlag = False
        allPackagesBuilt = False
        if Scheduler.isAnyPackagesFailedToBuild():
            setFailFlag = True

        if Scheduler.isAllPackagesBuilt():
            allPackagesBuilt = True

        if setFailFlag:
            self.logger.error("Some of the packages failed:")
            self.logger.error(Scheduler.listOfFailedPackages)
            raise Exception("Failed during building package")

        if not setFailFlag:
            if allPackagesBuilt:
                self.logger.debug("All packages built successfully")
            else:
                self.logger.error("Build stopped unexpectedly.Unknown error.")
                raise Exception("Unknown error")
コード例 #44
0
ファイル: CPU.py プロジェクト: mtarias/soyredes
 def __init__(self, conn):
     super(CPU, self).__init__()
     self.conn = conn
     self.pid_count = 0
     self.count = 0
     Memory.set_up()
     self.scheduler = Scheduler()
     self.current_process = None
コード例 #45
0
ファイル: Ghoul.py プロジェクト: ThreeDRadio/GraveyardGhoul
    def __init__(self):
        configStream = file('config.yaml', 'r')
        config = yaml.load(configStream)

        self.libraryDB = psycopg2.connect(host = config['music_database']['host'],
                                     user = config['music_database']['user'],
                                     password = config['music_database']['password'],
                                     database = config['music_database']['database'])
        
        try:
            self.messageDB = psycopg2.connect(host = config['msg_database']['host'],
                                     user = config['msg_database']['user'],
                                     password = config['msg_database']['password'],
                                     database = config['msg_database']['database'])
        
            self.messages = MessageLibrary(self.messageDB)
            self.messages.setStingCategories(config['messages']['sting_categories'])
            PlayItem.Message.basePath = config['file_manager']['message_base_path']
        except TypeError:
            self.messages = None
        
        if config['file_manager']['mode'] == "external":
            self.fm = ExternalFileManager(config['file_manager']['user_id'], 
                             config['file_manager']['password'],
                             config['file_manager']['httpUser'],
                             config['file_manager']['httpPass'])

        elif config['file_manager']['mode'] == 'local':
            self.fm = LocalFileManager(config['file_manager']['music_base_path'])


        self.library = MusicLibrary(self.libraryDB)
        self.library.setAustralianNames(config['music']['aus_names'])
        self.library.setMaxSongLength(config['music']['max_song_length'])
        PlayItem.Song.ausNames = config['music']['aus_names']
        
        self.logger = PlaylistLogger(config['logger']['auth'], config['logger']['baseURL'],config['logger']['showID'])
        
        self.playQueue = Queue(5)
       
        self.scheduler = Scheduler(self.library, self.messages, self.fm, self.playQueue)
        self.scheduler.setDemoQuota(config['scheduler']['quotas']['demo'])
        self.scheduler.setLocalQuota(config['scheduler']['quotas']['local'])
        self.scheduler.setAusQuota(config['scheduler']['quotas']['aus'])
        self.scheduler.setFemaleQuota(config['scheduler']['quotas']['female'])
        self.scheduler.setConsecutiveSongs(config['scheduler']['consecutive_songs']['min'],
                                      config['scheduler']['consecutive_songs']['max'])

        self.scheduler.addListener(self)

        self.player = Player.Player()
        self.playThread = Player.PlayThread(self.player, self.playQueue)
        self.playThread.addListener(self)

        self.paused = False

        self.listeners = list()
コード例 #46
0
    def configure(self, cfg_params):
        self.cfg_params = cfg_params
        self.jobtypeName   = cfg_params.get('CRAB.jobtype','')
        self.schedulerName = cfg_params.get('CRAB.scheduler','')
        Scheduler.configure(self,cfg_params)
        self.proxyValid=0

        self.dontCheckProxy=int(cfg_params.get("GRID.dont_check_proxy",0)) 	 
        self.space_token = cfg_params.get("USER.space_token",None) 	 
 	try:
            self.proxyServer = Downloader("http://cmsdoc.cern.ch/cms/LCG/crab/config/").config("myproxy_server.conf")
            self.proxyServer = self.proxyServer.strip()
            if self.proxyServer is None:
                raise CrabException("myproxy_server.conf retrieved but empty")
        except Exception, e:
            common.logger.info("Problem setting myproxy server endpoint: using myproxy.cern.ch")
            common.logger.debug(e)
            self.proxyServer= 'myproxy.cern.ch'
コード例 #47
0
ファイル: Crank.py プロジェクト: almlab/crank
 def __init__(self, \
              species_tree_structure,\
              gene_trees_file_location,\
              penalty_dict,\
              output_directory,\
              spr_search_width,\
              nni_search_width,\
              max_iterations,\
              job_queue,\
              erase_previous_run = True,\
              correct_tree_structure = None,\
              use_darwin  = True,\
              use_albertyw = True,\
              use_mitmunc = True,\
              species_trees_per_job = 2,\
             ):
     """
     Initialize Variables then check if there is already previous run data
     species_tree_structure - the structure of the species tree
     gene_trees_file_location - the location of the Gene Trees File
     penalty_dict - A dictionary of AnGST penalties
     output_directory - The directory to output all data to
     spr_search_width - The max number of sprs to try per iteration
     nni_search_width - The max number of nnis to try per iteration
     max_iterations - The maximum number of iterations to run Crank
     erase_previous_run - Whether to erase data from previous runs
     correct_tree_structure - The structure of the correct tree to compare to
     """
     self.gene_trees           = GenesFile(gene_trees_file_location)
     self.species_tree         = SpeciesTree(species_tree_structure)
     self.penalty_dict         = penalty_dict
     self.output_directory     = output_directory
     self.spr_search_width     = spr_search_width
     self.nni_search_width     = nni_search_width
     self.iteration_number     = 1
     self.max_iterations       = max_iterations
     self.max_operations_per_iteration = 2
     self.reverse_spr          = True
     self.species_tree_history = [str(self.species_tree)]
     self.log_file_location    = self.output_directory+"/output.txt"
     self.shell_file_directory = self.output_directory +"/shell/"
     self.scheduler            = Scheduler(job_queue, \
                                           self.shell_file_directory,\
                                           use_darwin,\
                                           use_albertyw,\
                                           use_mitmunc,\
                                           gene_trees_file_location,\
                                           species_trees_per_job)
     if correct_tree_structure == None:
         self.true_tree        = None
     else:
         self.true_tree        = SpeciesTree(correct_tree_structure)
     self.statistics           = CrankStatistics(self.gene_trees, \
         self.output_directory+"/stats", self.true_tree)
     self.memtest_location = self.output_directory+"/meminfo.txt"
     self.__load_run_data(erase_previous_run)
コード例 #48
0
ファイル: QueueManager.py プロジェクト: zachmprince/moose
    def reserveSlots(self, job):
        """
        Inherited method which controls when jobs are allowed to execute,
        depending on available resources.

        QueueManager only executes third party queueing commands. So
        modify every job to only require 1 process.
        """
        job.setSlots(1)
        return Scheduler.reserveSlots(self, job)
コード例 #49
0
 def listMatch(self, dest, full):
     matching='fast'
     ces=Scheduler.listMatch(self, dest, full)
     sites=[]
     for ce in ces:
         site=ce.split(":")[0]
         if site not in sites:
             sites.append(site)
         pass
     if full == True: matching='full'
     common.logger.debug("list of available site ( "+str(matching) +" matching ) : "+str(sites))
     return sites
コード例 #50
0
ファイル: TestFile.py プロジェクト: nightwolf55la/CS206
	def test6(self):
		self.initScheduler()
		dummyCourse = self.genDummyCourse()
		data = "CS", "53"
		
		oldAllCourses = Scheduler.allCourses
		Scheduler.allCourses = [dummyCourse]
		try:
			found = Scheduler.getCourse(data, [dummyCourse])
		except:
			assert False, "test 7 failed: exception"
		finally:
			Scheduler.allCourses = oldAllCourses
		
		if found.ID != data[1] or found.major != data[0]:
			print "getCourse() expected: ",data[0],data[1]
			print "getCourse() actual: ",found.major,found.ID
			assert False, "test 7 failed: getCourse error"
コード例 #51
0
class TestScheduler(unittest.TestCase):

    def setUp(self):
        self.pcb1 = PCB(3, 5, 20, 40, 4)
        self.pcb2 = PCB(8, 10, 25, 42, 3)
        self.pcb3 = PCB(20, 30, 15, 45, 5)
        self.pcb4 = PCB(46, 49, 5, 6, 1)

        self.scheduler = Scheduler()

    def test_scheduler_with_fifo(self):
        self.scheduler.set_as_fifo()
        self.scheduler.add_pcb(self.pcb1)
        self.scheduler.add_pcb(self.pcb2)
        self.scheduler.add_pcb(self.pcb3)
        self.scheduler.add_pcb(self.pcb4)
        result = self.scheduler.get_pcb()
        self.assertEqual(self.pcb1, result)
コード例 #52
0
 def __init__(self,cycle_length,scheduler_length):
     threading.Thread.__init__(self)
     self.folder = BotConfig.KeeperFolder
     self.folderpath = ""
     self.configfile = BotConfig.KeeperConfigFile
     self.cycle_length=cycle_length
     if self.folder:
         try:
             os.mkdir(self.folder)
         except:
             pass
         self.folderpath = self.folder + "/"
     self.configfile = self.folderpath+self.configfile
     self.chans = BotConfig.Chans
     self.threads = {}
     self.threadcnt = {}
     self.threadfiles = {}
     self.threadstates = {}
     self.threadLock = threading.Lock()
     self.scheduler = Scheduler(scheduler_length)
     self.scheduler.start()
     self.Restore()
コード例 #53
0
ファイル: TestFile.py プロジェクト: nightwolf55la/CS206
	def test5(self):
		self.initScheduler()
		stu = Student("patrick.txt")
		maj = Major("CS.req")
		
		sched = Scheduler()
		sched.populateCourseData()
		Scheduler.student = stu
		Scheduler.major = maj
		
		taken = [Scheduler.getCourse(data) for data in stu.classesTaken]
		
		try:
			sched.syncMajorWithStudent() ## this is being tested
		except:
			assert False, "Failed test 6: exception"
		
		for req in Scheduler.major.getRequirements():
			for course in req.getCourses():
				if course in taken:
					print course.name, "not removed from major"
					assert False, "Failed test 6: sync error"
コード例 #54
0
ファイル: MessageHandler.py プロジェクト: JeffBorwey/HypeBot
    def __init__(self, bot):
        self.math_parser = NumericStringParser()
        self.pprinter = pprint.PrettyPrinter(indent=4)
        self.bot = bot
        self.enable_bot = False
        self.last_msg = long(str(time.time()).split('.')[0])
        self.scheduler = Scheduler()
        self.scheduler.start()

        self.command_dict = dict()
        self.help_dict = dict()
        self.command_admin_permission = dict()
        self.command_enable_permission = dict()

        # add commands as needed
        self.register_command('enable', self.enable_bot_cmd, admin_only=True, enable_independent=True)
        self.register_command('disable', self.disable_bot_cmd, admin_only=True)
        self.register_command('join', self.join_cmd, admin_only=True)
        self.register_command('help', self.help_cmd, help='Display this message')

        self.register_command('math', MathHandler.MathHandler(self.bot, self.math_parser).handle,
                              help='Executes basic mathematics statements')

        google_engine_id = self.bot.config.get('Authentication', 'google_search_engine_id')
        google_auth_token = self.bot.config.get('Authentication', 'google_search_api_token')
        self.register_command('image', ImageSearch.ImageSearch(self.bot, google_auth_token, google_engine_id).handle,
                              help='Finds and displays the requested image from the internet. \n'
                                   'You can specify filetype:gif for animated images.\n'
                                   'Limited to 1 query per second.')
        self.register_command('gif', GifSearch.GifSearch(self.bot, google_auth_token, google_engine_id).handle,
                              help='Finds and displays the requested gif image from the internet. \n'
                              'Limited to 1 query per second.')
        self.register_command('mtg', MagicHandler.MagicTheGatheringHandler(self.bot).handle,
                              help='Finds and displays the requested Magic: The Gathering card.')
        self.register_command('wiki', WikipediaHandler.WikipediaHandler(self.bot).handle,
                              help='Finds and displays the requested wikipedia article, if it exists.')
        self.register_command('netrunner', NetrunnerHandler.NetrunnerHandler(self.bot).handle,
                              help='Finds and displays the requested Android: Netrunner card.')
        self.register_command('remind', self.remindme_cmd,
                              help='Reminds the user after the requested time period.')
        self.register_command('similarartist', SimilarArtist.SimilarArtist(self.bot).handle,
                              help='Displays a similar artist to the listed one.')
        self.register_command('lenny', LennyFaceHandler.LennyFaceHandler(self.bot).handle,
                              help='Finds and displays a random Lenny Face.')
        self.register_command('roll', RollHandler.RollHandler(self.bot).handle,
                              help='Rolls Y X-sided dice with the phrasing !roll YdX')
        self.register_command('translate', TranslateHandler.TranslateHandler(self.bot).handle,
                              help='Translates a phrase from one language to another. \nUse   '
                                     ' phrase|from_language|to_language \n'
                                     'OR phrase|to_language to translate to another language and trust in language auto-detection\n'
                                     'OR just phrase if you want to translate to English and still trust auto-detection. \n')
        wolfram_alpha_api_token = self.bot.config.get('Authentication', 'wolfram_alpha_api_token')
        self.wolfram_alpha_client = WolframAlphaClient(wolfram_alpha_api_token)
        self.register_command('wolfram',
                              WolframBasicQuery.WolframAlphaBasicQueryHandler(self.bot,
                                                                              self.wolfram_alpha_client).handle,
                              help='Queries WolframAlpha!')
        imgur_id = self.bot.config.get('Authentication', 'imgur_client_id')
        imgur_secret = self.bot.config.get('Authentication', 'imgur_client_secret')
        self.imgur_client = ImgurClient(imgur_id, imgur_secret)
        self.register_command('dankify',
                              Dankify.Dankify(self.bot, self.imgur_client).handle,
                              help='Can be used to produce a dankified image! \n'
                                   'Use: !dankify [url]')

        print('Bot started')
コード例 #55
0
ファイル: MessageHandler.py プロジェクト: JeffBorwey/HypeBot
class MessageHandler:

    def __init__(self, bot):
        self.math_parser = NumericStringParser()
        self.pprinter = pprint.PrettyPrinter(indent=4)
        self.bot = bot
        self.enable_bot = False
        self.last_msg = long(str(time.time()).split('.')[0])
        self.scheduler = Scheduler()
        self.scheduler.start()

        self.command_dict = dict()
        self.help_dict = dict()
        self.command_admin_permission = dict()
        self.command_enable_permission = dict()

        # add commands as needed
        self.register_command('enable', self.enable_bot_cmd, admin_only=True, enable_independent=True)
        self.register_command('disable', self.disable_bot_cmd, admin_only=True)
        self.register_command('join', self.join_cmd, admin_only=True)
        self.register_command('help', self.help_cmd, help='Display this message')

        self.register_command('math', MathHandler.MathHandler(self.bot, self.math_parser).handle,
                              help='Executes basic mathematics statements')

        google_engine_id = self.bot.config.get('Authentication', 'google_search_engine_id')
        google_auth_token = self.bot.config.get('Authentication', 'google_search_api_token')
        self.register_command('image', ImageSearch.ImageSearch(self.bot, google_auth_token, google_engine_id).handle,
                              help='Finds and displays the requested image from the internet. \n'
                                   'You can specify filetype:gif for animated images.\n'
                                   'Limited to 1 query per second.')
        self.register_command('gif', GifSearch.GifSearch(self.bot, google_auth_token, google_engine_id).handle,
                              help='Finds and displays the requested gif image from the internet. \n'
                              'Limited to 1 query per second.')
        self.register_command('mtg', MagicHandler.MagicTheGatheringHandler(self.bot).handle,
                              help='Finds and displays the requested Magic: The Gathering card.')
        self.register_command('wiki', WikipediaHandler.WikipediaHandler(self.bot).handle,
                              help='Finds and displays the requested wikipedia article, if it exists.')
        self.register_command('netrunner', NetrunnerHandler.NetrunnerHandler(self.bot).handle,
                              help='Finds and displays the requested Android: Netrunner card.')
        self.register_command('remind', self.remindme_cmd,
                              help='Reminds the user after the requested time period.')
        self.register_command('similarartist', SimilarArtist.SimilarArtist(self.bot).handle,
                              help='Displays a similar artist to the listed one.')
        self.register_command('lenny', LennyFaceHandler.LennyFaceHandler(self.bot).handle,
                              help='Finds and displays a random Lenny Face.')
        self.register_command('roll', RollHandler.RollHandler(self.bot).handle,
                              help='Rolls Y X-sided dice with the phrasing !roll YdX')
        self.register_command('translate', TranslateHandler.TranslateHandler(self.bot).handle,
                              help='Translates a phrase from one language to another. \nUse   '
                                     ' phrase|from_language|to_language \n'
                                     'OR phrase|to_language to translate to another language and trust in language auto-detection\n'
                                     'OR just phrase if you want to translate to English and still trust auto-detection. \n')
        wolfram_alpha_api_token = self.bot.config.get('Authentication', 'wolfram_alpha_api_token')
        self.wolfram_alpha_client = WolframAlphaClient(wolfram_alpha_api_token)
        self.register_command('wolfram',
                              WolframBasicQuery.WolframAlphaBasicQueryHandler(self.bot,
                                                                              self.wolfram_alpha_client).handle,
                              help='Queries WolframAlpha!')
        imgur_id = self.bot.config.get('Authentication', 'imgur_client_id')
        imgur_secret = self.bot.config.get('Authentication', 'imgur_client_secret')
        self.imgur_client = ImgurClient(imgur_id, imgur_secret)
        self.register_command('dankify',
                              Dankify.Dankify(self.bot, self.imgur_client).handle,
                              help='Can be used to produce a dankified image! \n'
                                   'Use: !dankify [url]')

        print('Bot started')

    def register_command(self, command_string, message_handler, help=None,
                         admin_only=False, enable_independent=False):
        self.command_dict[command_string] = message_handler

        if not admin_only:
            self.help_dict[command_string] = help

        if admin_only:
            self.command_admin_permission[command_string] = admin_only
        else:
            self.command_admin_permission[command_string] = False

        if enable_independent:
            self.command_enable_permission[command_string] = enable_independent
        else:
            self.command_enable_permission[command_string] = False

    def handle(self, msg):
        time_stamp_str = msg.xml.attrib['ts'].split('.')[0]
        time_stamp = long(time_stamp_str)

        if time_stamp < self.last_msg:
            return None
        elif msg['type'] == 'groupchat':
            # self.last_msg = time_stamp
            message_body = msg['body']
            from_name_full = msg['mucnick']
            split_str = message_body.split(' ')

            if len(split_str[0]) <= len(COMMAND_CHAR) or not split_str[0].startswith(COMMAND_CHAR):
                return None

            command = split_str[0].replace(COMMAND_CHAR, "", 1)

            handler = self.command_dict[command]
            admin_permission = self.command_admin_permission[command]
            enable_independent_permission = self.command_enable_permission[command]

            if handler is None or \
                    (admin_permission and not from_name_full == self.bot.user_nickname) or \
                    (not enable_independent_permission and not self.enable_bot):
                return None

            reply_msg = handler(split_str, from_name_full, msg)

            print("command %s by %s" % (command, from_name_full))

            if reply_msg is not None:
                self.bot.reply_room(msg, reply_msg)

    def enable_bot_cmd(self, message, from_name_full, msg_obj):
        self.enable_bot = True
        return "Functions Enabled"

    def disable_bot_cmd(self, message, from_name_full, msg_obj):
        self.enable_bot = False
        return "Functions Disabled"

    def join_cmd(self, message, from_name_full, msg_obj):
        room_name = ' '.join(message[1:])
        if self.bot.join_room_by_name(room_name):
            self.bot.reply_room(msg_obj, "Joining room '%s'" % room_name)
        else:
            self.bot.reply_room(msg_obj, "Could not find room")

    def help_cmd(self, message, from_name_full, msg_obj):
        returned_message = ""
        for help_message in self.help_dict.items():
            returned_message = returned_message + help_message[0] + ' : ' + help_message[1] + '\n'
        return returned_message

    def remindme_cmd(self, message, from_name_full, msg_obj):
        remind_date_text = ' '.join(message[1:])
        self.scheduler.schedule_job(remind_date_text,
                                    RemindMeHandler.RemindMeHandler(self.bot, from_name_full, msg_obj).job)
コード例 #56
0
ファイル: NMap.py プロジェクト: siavooshpayandehazad/SoCDep2
def n_map(tg, ag, noc_rg, critical_rg, non_critical_rg, shm, logging):
    """
    Performs NMap Mapping algorithm
    :param tg: Task Graph
    :param ag: Architecture Graph
    :param noc_rg: NoC Routing Graph
    :param critical_rg: NoC Routing Graph for Critical Region
    :param non_critical_rg: NoC Routing Graph for Non-Critical Region
    :param shm: System Health Map
    :param logging: logging File
    :return: TG and AG
    """
    print ("===========================================")
    print ("STARTING N-MAP MAPPING...\n")

    if len(tg.nodes()) > len(ag.nodes()):
        raise ValueError("Number of tasks should be smaller or equal to number of PEs")

    mapped_tasks = []
    unmapped_tasks = copy.deepcopy(tg.nodes())
    allocated_nodes = []
    unallocated_nodes = copy.deepcopy(ag.nodes())

    # remove all broken nodes from unallocated_nodes list
    for node in unallocated_nodes:
        if not shm.node[node]['NodeHealth']:
            unallocated_nodes.remove(node)
            print ("REMOVED BROKEN NODE "+str(node)+" FROM UN-ALLOCATED NODES")

    print ("------------------")
    print ("STEP 1:")
    # step 1: find the task with highest weighted communication volume
    tasks_com_dict = TG_Functions.tasks_communication_weight(tg)
    sorted_tasks_com = sorted(tasks_com_dict, key=tasks_com_dict.get, reverse=True)
    print ("\t SORTED TASKS BY COMMUNICATION WEIGHT:\n"+"\t "+str(sorted_tasks_com))
    print ("\t -------------")
    chosen_task = sorted_tasks_com[0]
    print ("\t CHOSEN TASK: "+str(chosen_task))
    mapped_tasks.append(chosen_task)
    print ("\t ADDED TASK "+str(chosen_task)+"TO MAPPED TASKS LIST")
    unmapped_tasks.remove(chosen_task)
    print ("\t REMOVED TASK "+str(chosen_task)+"FROM UN-MAPPED TASKS LIST")

    print ("------------------")
    print ("STEP 2:")
    node_neighbors_dict = AG_Functions.node_neighbors(ag, shm)
    sorted_node_neighbors = sorted(node_neighbors_dict, key=node_neighbors_dict.get, reverse=True)
    max_neighbors_node = AG_Functions.max_node_neighbors(node_neighbors_dict, sorted_node_neighbors)
    print ("\t SORTED NODES BY NUMBER OF NEIGHBOURS:\n"+"\t "+str(sorted_node_neighbors))
    print ("\t -------------")
    print ("\t NODES WITH MAX NEIGHBOURS:\t"+str(max_neighbors_node))
    chosen_node = random.choice(max_neighbors_node)

    print ("\t CHOSEN NODE: "+str(chosen_node))
    allocated_nodes.append(chosen_node)
    print ("\t ADDED NODE "+str(chosen_node)+" TO ALLOCATED NODES LIST")
    unallocated_nodes.remove(chosen_node)
    print ("\t REMOVED NODE "+str(chosen_node)+" FROM UN-ALLOCATED NODES LIST")
    # Map Chosen Task on Chosen Node...
    if Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_rg,
                                          non_critical_rg, chosen_task, chosen_node, logging):
        print ("\t \033[32m* NOTE::\033[0mTASK "+str(chosen_task)+" MAPPED ON NODE "+str(chosen_node))
    else:
        raise ValueError("Mapping task on node failed...")

    print ("------------------")
    print ("STEP 3:")
    while len(unmapped_tasks) > 0:
        print ("\033[33m==>\033[0m  UN-MAPPED TASKS #: "+str(len(unmapped_tasks)))
        print ("\t -------------")
        print ("\t STEP 3.1:")
        # find the unmapped task which communicates most with mapped_tasks
        max_com = 0
        unmapped_tasks_com = {}
        tasks_with_max_com_to_mapped = []
        for Task in unmapped_tasks:
            task_weight = 0
            for mapped_task in mapped_tasks:
                if (Task, mapped_task) in tg.edges():
                    task_weight += tg.edge[Task][mapped_task]["ComWeight"]
                if (mapped_task, Task) in tg.edges():
                    task_weight += tg.edge[mapped_task][Task]["ComWeight"]
            unmapped_tasks_com[Task] = task_weight
            if max_com < task_weight:
                max_com = task_weight
                tasks_with_max_com_to_mapped = [Task]
            elif max_com == task_weight:
                tasks_with_max_com_to_mapped.append(Task)
        print ("\t MAX COMMUNICATION WITH THE MAPPED TASKS: "+str(max_com))
        print ("\t TASK(S) WITH MAX COMMUNICATION TO MAPPED TASKS: "+str(tasks_with_max_com_to_mapped))
        if len(tasks_with_max_com_to_mapped) > 1:
            # multiple tasks with same comm to mapped
            # Find the one that communicate most with Un-mapped takss...
            candid_task_with_max_com_to_unmapped = []
            max_com = 0
            for CandidateTask in tasks_with_max_com_to_mapped:
                task_weight = 0
                for unmapped_task in unmapped_tasks:
                    if (Task, unmapped_task) in tg.edges():
                        task_weight += tg.edge[Task][unmapped_task]["ComWeight"]
                    if (unmapped_task, Task) in tg.edges():
                        task_weight += tg.edge[unmapped_task][Task]["ComWeight"]
                if task_weight > max_com:
                    candid_task_with_max_com_to_unmapped = [CandidateTask]
                elif task_weight == max_com:
                    candid_task_with_max_com_to_unmapped.append(CandidateTask)
            print ("\t CANDIDATE TASK(S) THAT COMMUNICATE MOST WITH UN_MAPPED: " +
                   str(candid_task_with_max_com_to_unmapped))
            if len(candid_task_with_max_com_to_unmapped) > 1:
                # if multiple tasks with the same com to unmmaped also,
                # choose randomly
                chosen_task = random.choice(candid_task_with_max_com_to_unmapped)
            else:
                chosen_task = candid_task_with_max_com_to_unmapped[0]
        else:
            chosen_task = tasks_with_max_com_to_mapped[0]
        print ("\t CHOSEN TASK: "+str(chosen_task))

        # Find the unallocated tile with lowest communication cost to/from the allocated_tiles_set.
        print ("\t -------------")
        print ("\t STEP 3.2:")
        min_cost = float("inf")
        node_candidates = []
        for unallocated_node in unallocated_nodes:
            cost = 0
            reachable = True
            for mapped_task in mapped_tasks:
                com_weight = 0
                if (chosen_task, mapped_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[chosen_task][mapped_task]["ComWeight"]
                    destination_node = tg.node[mapped_task]['task'].node
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.is_destination_reachable_from_source(noc_rg, unallocated_node,
                                                                                   destination_node):
                        manhatan_distance = AG_Functions.manhattan_distance(unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
                elif (mapped_task, chosen_task) in tg.edges():
                    # print ("TASK CONNECTED TO MAPPED TASK:", mapped_task)
                    com_weight += tg.edge[mapped_task][chosen_task]["ComWeight"]
                    destination_node = tg.node[mapped_task]['task'].node
                    # here we check if this node is even reachable from the chosen node?
                    if Calculate_Reachability.is_destination_reachable_from_source(noc_rg, destination_node,
                                                                                   unallocated_node):
                        manhatan_distance = AG_Functions.manhattan_distance(unallocated_node, destination_node)
                        cost += manhatan_distance * com_weight
                    else:
                        reachable = False
            if reachable:
                if cost < min_cost:
                    node_candidates = [unallocated_node]
                    min_cost = cost
                elif cost == min_cost:
                    node_candidates.append(unallocated_node)
            else:
                print ("\t \033[33m* NOTE::\033[0m NODE "+str(unallocated_node)+" CAN NOT REACH...")
                pass
        print ("\t CANDIDATE NODES: "+str(node_candidates)+" MIN COST: "+str(min_cost))

        if len(node_candidates) == 0:
            raise ValueError("COULD NOT FIND A REACHABLE CANDIDATE NODE...")
        elif len(node_candidates) > 1:
            chosen_node = random.choice(node_candidates)
        elif len(node_candidates) == 1:
            chosen_node = node_candidates[0]
        else:
            # this means that the chosen task is not connected to any other task... so its cost is infinity
            chosen_node = random.choice(unallocated_nodes)

        mapped_tasks.append(chosen_task)
        print ("\t ADDED TASK "+str(chosen_task)+" TO MAPPED TASKS LIST")
        unmapped_tasks.remove(chosen_task)
        print ("\t REMOVED TASK "+str(chosen_task)+" FROM UN-MAPPED TASKS LIST")

        allocated_nodes.append(chosen_node)
        print ("\t ADDED NODE "+str(chosen_node)+" TO ALLOCATED NODES LIST")
        unallocated_nodes.remove(chosen_node)
        print ("\t REMOVED NODE "+str(chosen_node)+" FROM UN-ALLOCATED NODES LIST")

        if Mapping_Functions.map_task_to_node(tg, ag, shm, noc_rg, critical_rg,
                                              non_critical_rg, chosen_task, chosen_node, logging):
            print ("\t \033[32m* NOTE::\033[0mTASK "+str(chosen_task)+" MAPPED ON NODE "+str(chosen_node))
        else:
            raise ValueError("Mapping task on node failed...")

    # Added by Behrad (Still under development)
    # Swapping phase
    print "-----------------------"
    print "PHASE ONE IS DONE... STARTING SWAP PROCESS..."
    for node_id_1 in range(0, len(ag.nodes())-1):
        for node_id_2 in range(node_id_1+1, len(ag.nodes())-1):
            pass
            # Save current mapping in an array
            # Also save the mapping's csomm_cost in a variable
            comm_cost = calculate_com_cost(tg)

            # Swap (node_id_1 , node_id_2)
            swap_nodes(tg, ag, shm, noc_rg, critical_rg, non_critical_rg, node_id_1, node_id_2, logging)
            # Check and calculate communication cost for all communication flows in the task graph
            #   (which is equal to the total number of edges in the application graph
            #   starting from the communication flow with the largest communication volume first
            comm_cost_new = calculate_com_cost(tg)
            # If comm_cost of current mapping is the same or bigger than the previous mapping, discard mapping
            #   Revert back to previous mapping with better comm_cost
            # Else
            #   Save new mapping as better mapping with less comm_cost
            if comm_cost_new < comm_cost:
                print "\033[32m* NOTE::\033[0m BETTER SOLUTION FOUND WITH COST:", comm_cost_new
            else:
                pass
                # print "Reverting to old solution"
                swap_nodes(tg, ag, shm, noc_rg, critical_rg, non_critical_rg,
                           node_id_2, node_id_1, logging)
            # Reset the comm_cost after each swapping

    # End of Swapping phase
    print "SWAP PROCESS FINISHED..."
    Scheduler.schedule_all(tg, ag, shm, True, logging)
    return tg, ag
コード例 #57
0
 def __init__(self):
     Scheduler.__init__(self,"SGE")
     self.datasetPath   = None
     self.selectNoInput = None
     self.OSBsize = None
     return