コード例 #1
0
def dataReceiveingLoop(sourceString):
  # print 'Into Receiveing Process Loop'
  decodeData = dataFormate.compileFromJson(sourceString)
  print decodeData.conditions
  # loading files according to different file type as memory block
  if decodeData.filetype == dataFormate.TYPE_TEXTFILE: 
    targetFile = FileReader.loadTextFile(decodeData.filepath)
  elif decodeData.filetype == dataFormate.TYPE_RCFILE:
    targetFile = FileReader.loadRCFile
  else:
    print "Can not support current file type"
    return
  # validate file and condition list, in case there are something unexpected happen
  if not validate(decodeData.conditions,targetFile):
    print "Error Occurred"
    return

  # reading predicates
  # prepare a list for predicates , each predicates has a memory block and a atom filter
  predicatesList=[]

  for condition in decodeData.conditions:
    predicate = dataFormate.Condition(condition)
    # print("the comparison operator is %s" % predicate.symbol)
    predicatesList.append(predicate)

  
  Jobs = FileReader.loadTEXTTargetColumns(targetFile.fileOperator,predicate)

  JobManager.submitJobs(Jobs)
コード例 #2
0
def test_JobManager_get_jobs():
    jobManager = JobManager()
    id1 = jobManager.create_job("ping 127.0.0.1")
    id2 = jobManager.create_job("ping 127.0.0.1")
    id3 = jobManager.create_job("ping 127.0.0.1")
    id4 = jobManager.create_job("ping 127.0.0.1")
    idarr = jobManager.get_jobs()
    assert (len(idarr) == 4)
コード例 #3
0
def onPluginStart():
    global p1_spX, p1_spY
    TileManager.InGameInit()
    BuildingInfo.init()
    JobManager.init()

    P1_startPositionX = EPD(0x58D720)
    P1_startPositionY = EPD(0x58D722)
    p1_spX = f_wread_epd(P1_startPositionX, 0)
    p1_spY = f_wread_epd(P1_startPositionY, 2)
    f_simpleprint("StartPosition : ", p1_spX, p1_spY)
コード例 #4
0
def beforeTriggerExec():
    global curTestIndex, p1_spX, p1_spY
    if EUDIf()(curTestIndex + testSize > 128 * 125):  #TileManager.tileNum):
        #f_simpleprint('reset')
        curTestIndex << 128 * 0
    EUDEndIf()
    TileManager.VisualizingTileDB(curTestIndex, curTestIndex + testSize)
    curTestIndex += testSize

    # if EUDIf()(f_bread(0x596A18 + 32) == 1):
    #     f_simpleprint('Attempt to create a job')
    #     JobManager.CreateJob(EncodeUnit("Terran Supply Depot"), p1_spX, p1_spY)
    # EUDEndIf()
    #JobManager.CreateJob(EncodeUnit("Terran Refinery"), 3808,96)#p1_spX, p1_spY)
    #JobManager.CreateJob(EncodeUnit("Terran Barracks"), 3808,96)#p1_spX, p1_spY)
    #JobManager.CreateJob(EncodeUnit("Terran Missile Turret"), p1_spX, p1_spY)
    #DoActions(MoveLocation(EncodeLocation("Anywhere"), EncodeUnit("Tom Kazansky"), P1, EncodeLocation("Location 1")))
    DoActions(
        MoveLocation(EncodeLocation("Location 1"), EncodeUnit("Tom Kazansky"),
                     P1, EncodeLocation("Anywhere")))
    loc1PosX = (f_dwread_epd(EPD(0x58DC68) + 5) +
                f_dwread_epd(EPD(0x58DC60) + 5)) // 2
    loc1PosY = (f_dwread_epd(EPD(0x58DC6C) + 5) +
                f_dwread_epd(EPD(0x58DC64) + 5)) // 2
    #f_simpleprint(loc1PosX,loc1PosY)
    if EUDIf()(Command(P1, AtLeast, 1, EncodeUnit("Terran Marine"))):
        DoActions(RemoveUnit(EncodeUnit("Terran Marine"), P1))
        f_simpleprint('서플을 지읍시다.')
        JobManager.CreateJob(EncodeUnit("Terran Supply Depot"), loc1PosX,
                             loc1PosY)
    EUDEndIf()
    if EUDIf()(Command(P1, AtLeast, 1, EncodeUnit("Terran Firebat"))):
        DoActions(RemoveUnit(EncodeUnit("Terran Firebat"), P1))
        f_simpleprint('커맨드를 지읍시다.')
        JobManager.CreateJob(EncodeUnit("Terran Command Center"), loc1PosX,
                             loc1PosY)
    EUDEndIf()
    if EUDIf()(Command(P1, AtLeast, 1, EncodeUnit("Terran Medic"))):
        DoActions(RemoveUnit(EncodeUnit("Terran Medic"), P1))
        f_simpleprint('터렛을 지읍시다.')
        JobManager.CreateJob(EncodeUnit("Terran Missile Turret"), loc1PosX,
                             loc1PosY)
    EUDEndIf()
    if EUDIf()(Command(P1, AtLeast, 1, EncodeUnit("Terran Ghost"))):
        DoActions(RemoveUnit(EncodeUnit("Terran Ghost"), P1))
        f_simpleprint('벙커를 지읍시다.')
        JobManager.CreateJob(EncodeUnit("Terran Bunker"), loc1PosX, loc1PosY)
    EUDEndIf()
    unitLoop.main()
    JobManager.Update()
コード例 #5
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def getJobDetails(jobId):
	"""
	Gets a specific job's details.

	@since: 1.8

	@type  jobId: integer
	@param jobId: the job ID identifying the job whose status should be retrieved.
	
	@throws Exception: in case of an internal error.

	@rtype: dict
	       {'id': integer, 'parent-id': integer, 'name': string,
	        'state': string in ['waiting', 'running', 'stopped', 'cancelled', 'killed', 'paused'],
	        'running-time': float or None, 'result': integer or None, 'username': string, 
					'start-time': float or None, 'stop-time': float or None, 'scheduled-at': float,
	        'type': string in ['ats', 'campaign'],
					'path': string (docroot-based path for jobs whose source is in docroot) or None (client-based source),
					'te-filename': string or None
					'te-input-parameters': dict or None
					'te-command-line': string or None
					'source': base64-encoded string
	       }
	@returns: a dict of info for the given job, or None if not found.

	@throws Exception: when the job was not found, or when the job file was removed.
	"""
	getLogger().info(">> getJobDetails(%s)" % str(jobId))
	res = []
	try:
		res = JobManager.instance().getJobDetails(jobId)
	except Exception, e:
		e =  Exception("Unable to complete getJobDetails operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< getJobDetails(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #6
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def getJobLog(jobId, useCompression = True):
	"""
	Gets the current log for an existing job.
	
	@since: 1.0

	@type  jobId: integer
	@param jobId: the job ID identifying the job whose log should be retrieved
	@type  useCompression: bool
	@param useCompression: if set to True, compress the log using zlib before encoding the response in base64
	
	@rtype: string
	@returns: the job's log in utf-8 encoded XML,
	          optionally gzip + base64 encoded if useCompression is set to True
	"""
	getLogger().info(">> getJobLog(%d, %s)" % (jobId, str(useCompression)))
	res = None
	try:
		log = JobManager.instance().getJobLog(jobId)
		if log is not None:
			if useCompression:
				res = base64.encodestring(zlib.compress(log))
			else:
				res = base64.encodestring(log)
	except Exception, e:
		e =  Exception("Unable to complete getJobLog operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< getJobLog(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #7
0
def randomRetries(inputFile, m, k):

    bestM = 1
    bestS = 100000000000
    bestR = 1
    for i in range(5):
        for m in range(m, m + 1):
            startTime = time.time()
            jobs = JobManager.JobManager(k, inputFile, m)
            machines = MachineBoss.MachineBoss(m)
            RandomSearch.RandomSearch(machines, jobs)

            makeSpan = machines.maxMachine().makeSpan
            OPT = max(jobs.MAXJOB, jobs.sumJobTime / float(m))
            bestS, bestM = "", ""
            if makeSpan < bestS:
                bestS = makeSpan
                bestM = m
                bestR = OPT
            exTime = time.time() - startTime
            #print "Max Machine Run time: "+ str(makeSpan) + " OPT for " + str(m) + " machines is " + str(ratio)
            #print "ratio: " + str(makeSpan/ratio)

        print "Random Search: \t\t " + str(bestS) + "|| Ratio: " + str(
            bestS / OPT) + " run time: " + str(exTime)
コード例 #8
0
ファイル: WebServices.py プロジェクト: delian/testerman
def purgeJobQueue(older_than):
    """
	Purges jobs in the queue that:
	- are completed (any status)
	- and whose completion time is strictly older than the provided older_than timestamp (UTC)

	@since: 1.5

	@type  older_than: float (timestamp)
	@param older_than: the epoch timestamp of the older job to keep

	@throws Exception in case of an error

	@rtype: int
	@returns: the number of purged jobs
	"""
    getLogger().info(">> purgeJobQueue(%s)" % older_than)
    res = 0
    try:
        res = JobManager.instance().purgeJobs(older_than)
    except Exception as e:
        e = Exception("Unable to complete purgeJobs operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< purgeJobQueue(...): Fault:\n%s" % str(e))
        raise (e)

    getLogger().info("<< purgeJobQueue: %s job(s) purged" % res)
    return res
コード例 #9
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def getJobInfo(jobId = None):
	"""
	Gets a job or all jobs information.

	@since: 1.0

	@type  jobId: integer, or None
	@param jobId: the job ID identifying the job whose status should be retrieved, or None for all jobs.
	
	@throws Exception: in case of an internal error.

	@rtype: a list of dict
	       {'id': integer, 'parent-id': integer, 'name': string,
	        'state': string in ['waiting', 'running', 'stopped', 'cancelled', 'killed', 'paused'],
	        'running-time': float or None, 'result': integer or None, 'username': string, 
					'start-time': float or None, 'stop-time': float or None, 'scheduled-at': float,
	        'type': string in ['ats', 'campaign'],
					'path': string (docroot-based path for jobs whose source is in docroot) or None (client-based source)
	       }
	@returns: a list of info for the given job, or for all jobs in the queue if jobId is None.

	@throws Exception: when the job was not found, or when the job file was removed.
	"""
	getLogger().info(">> getJobInfo(%s)" % str(jobId))
	res = []
	try:
		res = JobManager.instance().getJobInfo(jobId)
	except Exception, e:
		e =  Exception("Unable to complete getJobInfo operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< getJobInfo(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #10
0
ファイル: WebServices.py プロジェクト: delian/testerman
def sendSignal(jobId, signal):
    """
	Sends a signal to the job id'd by jobId.
	
	@since: 1.0

	@type  jobId: integer
	@param jobId: the job Id
	@type  signal: string
	@param signal: the signal to send to the job 
	
	@throws Exception: in case of an internal error.

	@rtype: bool
	@returns: True if successfully sent, or False if the job was not found.
	"""
    getLogger().info(">> sendSignal(%d, %s)" % (jobId, signal))
    ret = False
    try:
        ret = JobManager.instance().sendSignal(jobId, signal)
    except Exception as e:
        e = Exception("Unable to perform operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< sendSignal(...): Fault:\n%s" % str(e))
        raise (e)
    getLogger().info("<< sendSignal: %s" % str(ret))
    return ret
コード例 #11
0
ファイル: WebServices.py プロジェクト: delian/testerman
def getJobLogFilename(jobId):
    """
	Gets an existing job's log filename.
	
	@since: 1.0

	@type  jobId: integer
	@param jobId: the job ID identifying the job whose log filename should be retrieved
	
	@rtype: string, or None
	@returns: the log filename relative to the docroot,
	          or None if the job was not found
	"""
    getLogger().info(">> getJobLogFilename(%d)" % jobId)
    res = None
    try:
        res = JobManager.instance().getJobLogFilename(jobId)
    except Exception as e:
        e = Exception(
            "Unable to complete getJobLogFilename operation: %s\n%s" %
            (str(e), Tools.getBacktrace()))
        getLogger().info("<< getJobLogFilename(...): Fault:\n%s" % str(e))
        raise (e)
    getLogger().info("<< getJobLogFilename: %s" % str(res))
    return res
コード例 #12
0
ファイル: WebServices.py プロジェクト: delian/testerman
def rescheduleJob(jobId, at):
    """
	Reschedules a job to start at <at>.

	@since: 1.2

	@type  jobId: integer
	@param jobId: the jobId identifying the job that needs rescheduling
	@type  at: float
	@param at: the timestamp of the new scheduled start
	
	@throws Exception: in case of an internal error.

	@rtype: bool
	@returns: True if the rescheduling was OK, False otherwise (job already started)
	"""
    getLogger().info(">> rescheduleJob(%s)" % str(jobId))
    res = False
    try:
        res = JobManager.instance().rescheduleJob(jobId, at)
    except Exception as e:
        e = Exception("Unable to complete rescheduleJob operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< rescheduleJob(...): Fault:\n%s" % str(e))
        raise (e)

    getLogger().info("<< rescheduleJob: %s" % str(res))
    return res
コード例 #13
0
ファイル: JobOrganizer.py プロジェクト: l1calo/TrigT1CaloCAF
	def __init__(self, sConfigFile):

		self.sConfigFile = sConfigFile

		self.dbConnection = ""

		self.logger = None
		self.initLogging("JobOrganizer")
		self.logger.info("========================================")
		self.logger.info("JobOrganizer in init")


		# check if initfile is valid
		if os.path.exists(self.sConfigFile):
			self.logger.info("init file " + self.sConfigFile + " was found")
			self.configModule = __import__(self.sConfigFile.strip(".py"))

			#self.createRunlisteners()
			#self.initRunListeners()
			self.initDbParameters()
			self.readJobsConfiguration()

			self.jobManager = JobManager(self.sConfigFile)

			Tools.createFolder('tmp/jobConfig')

		else:
			self.logger.error("init file "+ self.sConfigFile + " was not found !")
			sys.exit(0)

		self.logger.info("JobOrganizer initialized")
		self.logger.info("========================================")
コード例 #14
0
ファイル: WebServices.py プロジェクト: delian/testerman
def getJobLog(jobId, useCompression=True):
    """
	Gets the current log for an existing job.
	
	@since: 1.0

	@type  jobId: integer
	@param jobId: the job ID identifying the job whose log should be retrieved
	@type  useCompression: bool
	@param useCompression: if set to True, compress the log using zlib before encoding the response in base64
	
	@rtype: string
	@returns: the job's log in utf-8 encoded XML,
	          optionally gzip + base64 encoded if useCompression is set to True
	"""
    getLogger().info(">> getJobLog(%d, %s)" % (jobId, str(useCompression)))
    res = None
    try:
        log = JobManager.instance().getJobLog(jobId)
        if log is not None:
            if useCompression:
                res = base64.encodestring(zlib.compress(log))
            else:
                res = base64.encodestring(log)
    except Exception as e:
        e = Exception("Unable to complete getJobLog operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< getJobLog(...): Fault:\n%s" % str(e))
        raise (e)

    getLogger().info("<< getJobLog: %d bytes returned" % len(res))
    return res
コード例 #15
0
ファイル: WebServices.py プロジェクト: delian/testerman
def getJobInfo(jobId=None):
    """
	Gets a job or all jobs information.

	@since: 1.0

	@type  jobId: integer, or None
	@param jobId: the job ID identifying the job whose status should be retrieved, or None for all jobs.
	
	@throws Exception: in case of an internal error.

	@rtype: a list of dict
	       {'id': integer, 'parent-id': integer, 'name': string,
	        'state': string in ['waiting', 'running', 'stopped', 'cancelled', 'killed', 'paused'],
	        'running-time': float or None, 'result': integer or None, 'username': string, 
					'start-time': float or None, 'stop-time': float or None, 'scheduled-at': float,
	        'type': string in ['ats', 'campaign'],
					'path': string (docroot-based path for jobs whose source is in docroot) or None (client-based source)
	       }
	@returns: a list of info for the given job, or for all jobs in the queue if jobId is None.

	@throws Exception: when the job was not found, or when the job file was removed.
	"""
    getLogger().info(">> getJobInfo(%s)" % str(jobId))
    res = []
    try:
        res = JobManager.instance().getJobInfo(jobId)
    except Exception as e:
        e = Exception("Unable to complete getJobInfo operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< getJobInfo(...): Fault:\n%s" % str(e))
        raise (e)

    getLogger().info("<< getJobInfo: %d job info entries returned" % len(res))
    return res
コード例 #16
0
ファイル: Controller.py プロジェクト: uzh/QHG
 def __init__(self, host_string, port, num_iters, job_dir, job_mask,
              timeout):
     self.job_manager = JobManager.JobManager(job_dir, job_mask)
     self.host_list = host_string.split(':')
     self.port = port
     self.timeout = timeout
     self.comms = []
     self.threads = []
     self.num_iters = num_iters
     self.this_host = socket.gethostname()
     self.proc_list = []
コード例 #17
0
    def delete_jobs(self):
        self.hginterface.showLoading('Deleting data(s)...')

        rows = self.hginterface.grid_joblist.get_selected_rows()

        job = JobManager.JobManager()

        for row in rows:
            jobid = self.hginterface.grid_joblist.df.iloc[[row]].index[0]
            job.deleteJob(jobid)


        self.load_joblist()

        self.hginterface.hideLoading()
コード例 #18
0
    def ping(self):
        import JobManager
        import ServerStatus
        import socket

        try:
            if (self.opts['verbose'] == True):
                serverStatusObj = ServerStatus.ServerStatus()
                serverStatusObj.displayServerStatus(self.opts['verbose'])
            else:
                jobMgr = JobManager.JobManager()
                jobMgr.pingServer()
        except socket.timeout, msg:
            print >> sys.stderr, msg
            sys.exit(106)
コード例 #19
0
def test_JobManager_kill_jobs():
    jobManager = JobManager()
    id1 = jobManager.create_job("ping 127.0.0.1")
    id2 = jobManager.create_job("ping 127.0.0.1")
    id3 = jobManager.create_job("ping 127.0.0.1")
    id4 = jobManager.create_job("ping 127.0.0.1")
    time.sleep(2)  # Give thread and proc a chance to start
    killed = jobManager.kill_job(id1)
    assert (killed == True)
    assert (len(jobManager.get_jobs_stopped()) == 1)
コード例 #20
0
    def run(self, name, datafiles, goldnet_file):
        import numpy

        os.chdir(os.environ["gene_path"])

        datastore = ReadData(datafiles[0], "steadystate")
        for file in datafiles[1:]:
            datastore.combine(ReadData(file, "steadystate"))
        datastore.normalize()

        settings = {}
        settings = ReadConfig(settings)
        # TODO: CHANGE ME
        settings["global"]["working_dir"] = os.getcwd() + '/'

        # Setup job manager
        print "Starting new job manager"
        jobman = JobManager(settings)

        # Make GENIE3 jobs
        genie3 = GENIE3()
        genie3.setup(datastore, settings, name)

        print "Queuing job..."
        jobman.queueJob(genie3)

        print jobman.queue
        print "Running queue..."
        jobman.runQueue()
        jobman.waitToClear()

        print "Queue finished"
        job = jobman.finished[0]
        print job.alg.gene_list
        print job.alg.read_output(settings)
        jobnet = job.alg.network
        print "PREDICTED NETWORK:"
        print job.alg.network.network
        print jobnet.original_network

        if goldnet_file != None:
            goldnet = Network()
            goldnet.read_goldstd(goldnet_file)
            print "GOLD NETWORK:"
            print goldnet.network
            print jobnet.analyzeMotifs(goldnet).ToString()
            print jobnet.calculateAccuracy(goldnet)

        return jobnet.original_network
コード例 #21
0
    def load_joblist(self, refresh_jobstatus = False):

        if refresh_jobstatus:
            self.hginterface.showLoading(msg='Refreshing data fetch list...')

            job = JobManager.JobManager()
            job.updateAllJobStatus()
            time.sleep(0.5) 
            self.hginterface.hideLoading()

        db = DBManager()
        joblist = db.getJobList()
        joblist = [x[:10] for x in joblist]

        self.joblist_df = self.hginterface.setJobList(joblist)
        self.hginterface.txt_joboutput.value = ''
        self.hginterface.txt_joboutout_title.value = 'Data fetch output'

        self.hginterface.grid_joblist._selected_rows = []
コード例 #22
0
    def run(self, ts_file, name=None, delta_t=30):

        os.chdir(os.environ["gene_path"])

        print "Reading in knockout data"
        timeseries_storage = ReadData(ts_file, "timeseries")

        settings = {}
        settings = ReadConfig(settings)
        # TODO: CHANGE ME
        settings["global"]["working_dir"] = os.getcwd() + "/"

        # Setup job manager
        print "Starting new job manager"
        jobman = JobManager(settings)

        # Make Banjo jobs
        banjojob = Banjo()
        if delta_t != None:
            settings["global"]["time_series_delta_t"] = int(delta_t)
        else:
            settings["global"]["time_series_delta_t"] = 30
        if name != None:
            banjojob.setup(timeseries_storage, settings, name)
        else:
            banjojob.setup(timeseries_storage, settings)

        print "Queuing job..."
        jobman.queueJob(banjojob)

        print jobman.queue
        print "Running queue..."
        jobman.runQueue()
        jobman.waitToClear()

        print "Queue finished"
        job = jobman.finished[0]
        print job.alg.gene_list
        print job.alg.read_output(settings)
        jobnet = job.alg.network
        print "PREDICTED NETWORK:"
        # print job.alg.network.network
        # print jobnet.original_network

        return jobnet.original_network
コード例 #23
0
def main():
    k = 3
    """
    for m in  range(2,4):

        jobs = JobManager.JobManager(k,'testFile.txt')
        machines = MachineBoss.MachineBoss(m)
        GreedyScheduler.GreedyScheduler(machines,jobs)

        makeSpan =  machines.maxMachine().makeSpan

        #print "Max Machine Run time: "+ str(makeSpan) + " OPT for " + str(m) + " machines is " + str(ratio)
        #print "ratio: " + str(makeSpan/ratio)
    LB = max(jobs.sumJobTime/m,max(jobs.jobs))
    print "LB = "+str(LB)
    print "Best: m= "+ str(m) + " Greedy makespan: " + str(makeSpan) + " ratio: " + str(makeSpan/LB)

    for m in  range(2,4):
        jobs = JobManager.JobManager(k,'testFile.txt')
        machines = MachineBoss.MachineBoss(m)
        SortedGreedyScheduler.SortedGreedyScheduler(machines,jobs)
        makeSpan =  machines.maxMachine().makeSpan

        #print "Max Machine Run time: "+ str(makeSpan) + " OPT for " + str(m) + " machines is " + str(ratio)
        #print "ratio: " + str(makeSpan/ratio)
    print "Best: m= "+ str(m) + " SortedGreedy makespan: " + str(makeSpan) + " ratio: " + str(makeSpan/LB)
    for m in machines:
        print str(m.getJobs())
    """

    for m in range(2, 8):
        jobs = JobManager.JobManager(k, 'input1.txt', m)
        machines = MachineBoss.MachineBoss(m)
        RandomScheduler.RandomScheduler(machines, jobs)
        makeSpan = machines.maxMachine().makeSpan

        #print "Max Machine Run time: "+ str(makeSpan) + " OPT for " + str(m) + " machines is " + str(ratio)
        #print "ratio: " + str(makeSpan/ratio)
        LB = max(jobs.sumJobTime / m, max(jobs.jobs))
        print "LB = " + str(LB)
        print "Best: m= " + str(m) + " Greedy makespan: " + str(
            makeSpan) + " ratio: " + str(makeSpan / LB)
コード例 #24
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def persistJobQueue():	
	"""	
	Persists the current job queue to the standard persistence file.
	This administrative function may be convenient when you're about
	to kill the server violently.
	
	@since: 1.5

	@throws Exception in case of an error
	
	@rtype: None
	"""
	getLogger().info(">> persistJobQueue()")
	
	try:
		res = JobManager.instance().persist()
	except Exception, e:
		e =  Exception("Unable to complete persistJobQueue operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< persistJobQueue(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #25
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def getJobLogFilename(jobId):
	"""
	Gets an existing job's log filename.
	
	@since: 1.0

	@type  jobId: integer
	@param jobId: the job ID identifying the job whose log filename should be retrieved
	
	@rtype: string, or None
	@returns: the log filename relative to the docroot,
	          or None if the job was not found
	"""
	getLogger().info(">> getJobLogFilename(%d)" % jobId)
	res = None
	try:
		res = JobManager.instance().getJobLogFilename(jobId)
	except Exception, e:
		e =  Exception("Unable to complete getJobLogFilename operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< getJobLogFilename(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #26
0
ファイル: WebServices.py プロジェクト: rishie/testerman
def persistJobQueue():
    """	
	Persists the current job queue to the standard persistence file.
	This administrative function may be convenient when you're about
	to kill the server violently.
	
	@since: 1.5

	@throws Exception in case of an error
	
	@rtype: None
	"""
    getLogger().info(">> persistJobQueue()")

    try:
        res = JobManager.instance().persist()
    except Exception, e:
        e = Exception("Unable to complete persistJobQueue operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< persistJobQueue(...): Fault:\n%s" % str(e))
        raise (e)
コード例 #27
0
ファイル: WebServices.py プロジェクト: delian/testerman
def getJobDetails(jobId):
    """
	Gets a specific job's details.

	@since: 1.8

	@type  jobId: integer
	@param jobId: the job ID identifying the job whose status should be retrieved.
	
	@throws Exception: in case of an internal error.

	@rtype: dict
	       {'id': integer, 'parent-id': integer, 'name': string,
	        'state': string in ['waiting', 'running', 'stopped', 'cancelled', 'killed', 'paused'],
	        'running-time': float or None, 'result': integer or None, 'username': string, 
					'start-time': float or None, 'stop-time': float or None, 'scheduled-at': float,
	        'type': string in ['ats', 'campaign'],
					'path': string (docroot-based path for jobs whose source is in docroot) or None (client-based source),
					'te-filename': string or None
					'te-input-parameters': dict or None
					'te-command-line': string or None
					'source': base64-encoded string
	       }
	@returns: a dict of info for the given job, or None if not found.

	@throws Exception: when the job was not found, or when the job file was removed.
	"""
    getLogger().info(">> getJobDetails(%s)" % str(jobId))
    res = []
    try:
        res = JobManager.instance().getJobDetails(jobId)
    except Exception as e:
        e = Exception("Unable to complete getJobDetails operation: %s\n%s" %
                      (str(e), Tools.getBacktrace()))
        getLogger().info("<< getJobDetails(...): Fault:\n%s" % str(e))
        raise (e)

    getLogger().info("<< getJobDetails: job details returned")
    return res
コード例 #28
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def rescheduleJob(jobId, at):
	"""
	Reschedules a job to start at <at>.

	@since: 1.2

	@type  jobId: integer
	@param jobId: the jobId identifying the job that needs rescheduling
	@type  at: float
	@param at: the timestamp of the new scheduled start
	
	@throws Exception: in case of an internal error.

	@rtype: bool
	@returns: True if the rescheduling was OK, False otherwise (job already started)
	"""
	getLogger().info(">> rescheduleJob(%s)" % str(jobId))
	res = False
	try:
		res = JobManager.instance().rescheduleJob(jobId, at)
	except Exception, e:
		e =  Exception("Unable to complete rescheduleJob operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< rescheduleJob(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #29
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def purgeJobQueue(older_than):
	"""
	Purges jobs in the queue that:
	- are completed (any status)
	- and whose completion time is strictly older than the provided older_than timestamp (UTC)

	@since: 1.5

	@type  older_than: float (timestamp)
	@param older_than: the epoch timestamp of the older job to keep

	@throws Exception in case of an error

	@rtype: int
	@returns: the number of purged jobs
	"""
	getLogger().info(">> purgeJobQueue(%s)" % older_than)
	res = 0
	try:
		res = JobManager.instance().purgeJobs(older_than)
	except Exception, e:
		e =  Exception("Unable to complete purgeJobs operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< purgeJobQueue(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #30
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def sendSignal(jobId, signal):
	"""
	Sends a signal to the job id'd by jobId.
	
	@since: 1.0

	@type  jobId: integer
	@param jobId: the job Id
	@type  signal: string
	@param signal: the signal to send to the job 
	
	@throws Exception: in case of an internal error.

	@rtype: bool
	@returns: True if successfully sent, or False if the job was not found.
	"""
	getLogger().info(">> sendSignal(%d, %s)" % (jobId, signal))
	ret = False
	try:
		ret = JobManager.instance().sendSignal(jobId, signal)
	except Exception, e:
		e =  Exception("Unable to perform operation: %s\n%s" % (str(e), Tools.getBacktrace()))
		getLogger().info("<< sendSignal(...): Fault:\n%s" % str(e))
		raise(e)
コード例 #31
0
    def run(self, kofile, tsfile, wtfile, datafiles, name, goldnet_file, normalize=False):
        os.chdir(os.environ["gene_path"])
        knockout_storage = ReadData(kofile, "knockout")
        print "Reading in knockout data"
        wildtype_storage = ReadData(wtfile, "steadystate")

        if datafiles == []:
          other_storage = None
        else:
          other_storage = ReadData(datafiles[0], "steadystate")
          for file in datafiles[1:]:
              other_storage.combine(ReadData(file, "steadystate"))

        timeseries_storage = None
        if tsfile != None:
            timeseries_storage = ReadData(tsfile, "timeseries")
            #for ts in timeseries_storage:
                #ts.normalize()

        #if normalize:
            #knockout_storage.normalize()
            #wildtype_storage.normalize()
            #other_storage.normalize()


        settings = {}
        settings = ReadConfig(settings)
        # TODO: CHANGE ME
        settings["global"]["working_dir"] = os.getcwd() + '/'

        # Setup job manager
        print "Starting new job manager"
        jobman = JobManager(settings)

        # Make inferelator jobs
        inferelatorjob = inferelator()
        inferelatorjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, other_storage, name)

        print "Queuing job..."
        jobman.queueJob(inferelatorjob)

        print jobman.queue
        print "Running queue..."
        jobman.runQueue()
        jobman.waitToClear()

        print "Queue finished"
        job = jobman.finished[0]
        #print job.alg.gene_list
        #print job.alg.read_output(settings)
        jobnet = job.alg.network
        #print "PREDICTED NETWORK:"
        #print job.alg.network.network
        print jobnet.original_network

        if goldnet_file != None:
            goldnet = Network()
            goldnet.read_goldstd(goldnet_file)
            #print "GOLD NETWORK:"
            #print goldnet.network
            #print jobnet.analyzeMotifs(goldnet).ToString()
            print jobnet.calculateAccuracy(goldnet)
            import AnalyzeResults
            tprs, fprs, rocs = AnalyzeResults.GenerateMultiROC(jobman.finished, goldnet )
            ps, rs, precs = AnalyzeResults.GenerateMultiPR(jobman.finished, goldnet)
            print "Area Under ROC"
            print rocs

            print "Area Under PR"
            print precs

        return jobnet.original_network
コード例 #32
0
# 2013.08.22 22:14:36 Pacific Daylight Time
# Embedded file name: direct.showbase.JobManagerGlobal
__all__ = ['jobMgr']
import JobManager
jobMgr = JobManager.JobManager()
# okay decompyling C:\Users\Maverick\Documents\Visual Studio 2010\Projects\Unfreezer\py2\direct\showbase\JobManagerGlobal.pyc 
# decompiled 1 files: 1 okay, 0 failed, 0 verify failed
# 2013.08.22 22:14:36 Pacific Daylight Time
コード例 #33
0
kd_file = "algorithms/genenetweaver/InSilicoSize10-Ecoli1_knockdowns.tsv"
wt_file = "algorithms/genenetweaver/InSilicoSize10-Ecoli1_wildtype.tsv"
ts_file = "algorithms/genenetweaver/InSilicoSize10-Ecoli1_dream4_timeseries.tsv"
goldnet.read_goldstd("algorithms/genenetweaver/InSilicoSize10-Ecoli1_goldstandard.tsv")


# Read data into program
# Where the format is "FILENAME" "DATATYPE"
knockout_storage = ReadData(ko_file, "knockout")
knockdown_storage = ReadData(kd_file, "knockdown")
timeseries_storage = ReadData(ts_file, "timeseries")
wildtype_storage = ReadData(wt_file, "wildtype")


# Setup job manager
jobman = JobManager(settings)

# Make MCZ job
mczjob = MCZ()
mczjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "MCZ")
jobman.queueJob(mczjob)

print jobman.queue
jobman.runQueue()
jobman.waitToClear()


tprs, fprs, rocs = GenerateMultiROC(
    jobman.finished, goldnet, False, settings["global"]["output_dir"] + "/OverallROC.pdf"
)
ps, rs, precs = GenerateMultiPR(jobman.finished, goldnet, False, settings["global"]["output_dir"] + "/OverallPR.pdf")
コード例 #34
0
def get_network_results(name, settings, cache):
  print "STARTING", name

  if name in cache.keys():
    print "CACHE HIT"
    return cache[name]

  ko_file, kd_file, ts_file, wt_file, mf_file, goldnet = get_example_data_files(name, settings)

  # Create date string to append to output_dir
  t = datetime.now().strftime("%Y-%m-%d_%H.%M.%S")
  settings["global"]["output_dir"] = settings["global"]["output_dir_save"] + "/" + \
      settings["global"]["experiment_name"] + "-" + t + "-" + name + "/"
  os.mkdir(settings["global"]["output_dir"])

  # Get a list of the multifactorial files

  # Read data into program
  # Where the format is "FILENAME" "DATATYPE"
  mf_storage = ReadData(mf_file[0], "multifactorial")
  knockout_storage = ReadData(ko_file[0], "knockout")
  knockdown_storage = ReadData(kd_file[0], "knockdown")
  wildtype_storage = ReadData(wt_file[0], "wildtype")
  timeseries_storage = ReadData(ts_file[0], "timeseries")
  gene_list = knockout_storage.gene_list

  # Setup job manager
  jobman = JobManager(settings)

  # MCZ
  mczjob = MCZ()
  mczjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "MCZ")
  jobman.queueJob(mczjob)

  # CLR
  clrjob = CLR()
  clrjob.setup(knockout_storage, settings, "CLR", "plos", 6)
  jobman.queueJob(clrjob)

  # GENIE3
  mf_storage.combine(knockout_storage)
  mf_storage.combine(wildtype_storage)
  mf_storage.combine(knockdown_storage)
  genie3job = GENIE3()
  genie3job.setup(mf_storage, settings, "GENIE3")
  jobman.queueJob(genie3job)

  ## TLCLR
  tlclrjob = TLCLR()
  tlclrjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "TLCLR")
  jobman.queueJob(tlclrjob)

  #if sys.argv[1] != "dream4100":
      #cojob = ConvexOptimization()
      #cojob.setup(knockout_storage, settings, "ConvOpt_T-"+ str(0.01),None, None, 0.01)
      #jobman.queueJob(cojob)

  ### DFG4GRN
  dfg = DFG4GRN()
  settings["dfg4grn"]["eta_z"] = 0.01
  settings["dfg4grn"]["lambda_w"] = 0.001
  settings["dfg4grn"]["tau"] = 3
  dfg.setup(timeseries_storage, TFList(timeseries_storage[0].gene_list), settings, "DFG", 20)
  jobman.queueJob(dfg)

  ### Inferelator

  ### NIR
  nirjob = NIR()
  nirjob.setup(knockout_storage, settings, "NIR", 5, 5)
  jobman.queueJob(nirjob)

  #### TDARACNE
  settings = ReadConfig(settings, "./config/default_values/tdaracne.cfg")
  bjob = tdaracne()
  settings["tdaracne"]["num_bins"] = 4
  bjob.setup(timeseries_storage, settings, "TDARACNE")
  jobman.queueJob(bjob)


  print jobman.queue
  jobman.runQueue()
  jobman.waitToClear(name)
  SaveResults(jobman.finished, goldnet, settings, name)

  cache[name] = jobman.finished[:]

  return cache[name]
コード例 #35
0
def simpleTest(writeFile, inputFile, m, k):
    line = ""
    bestM = 1
    bestS = 100000000000
    bestR = 0
    for m in range(m, m + 1):
        startTime = time.time()
        jobs = JobManager.JobManager(k, inputFile, m)
        machines = MachineBoss.MachineBoss(m)
        SortedGreedyScheduler.SortedGreedyScheduler(machines, jobs)

        makeSpan = machines.maxMachine().makeSpan
        OPT = max(jobs.MAXJOB, jobs.sumJobTime / float(m))
        bestS, bestM = "", ""
        if makeSpan < bestS:
            bestS = makeSpan
            bestM = m
            bestR = OPT
        exTime = time.time() - startTime

    line = line + str(m) + "," + str(k) + "," + str(bestS / OPT) + ","
    print "SG"

    bestM = 1
    bestS = 100000000000
    bestR = 1
    for i in range(5):
        for m in range(m, m + 1):
            startTime = time.time()
            jobs = JobManager.JobManager(k, inputFile, m)
            machines = MachineBoss.MachineBoss(m)
            RandomSearch.RandomSearch(machines, jobs)

            makeSpan = machines.maxMachine().makeSpan
            OPT = max(jobs.MAXJOB, jobs.sumJobTime / float(m))
            bestS, bestM = "", ""
            if makeSpan < bestS:
                bestS = makeSpan
                bestM = m
                bestR = OPT
            exTime = time.time() - startTime
            #print "Max Machine Run time: "+ str(makeSpan) + " OPT for " + str(m) + " machines is " + str(ratio)
            #print "ratio: " + str(makeSpan/ratio)

    line = line + str(bestS / OPT) + ","
    print "RS"

    bestM = 1
    bestS = 100000000000
    bestR = 1
    for i in range(5):
        for m in range(m, m + 1):
            startTime = time.time()
            jobs = JobManager.JobManager(k, inputFile, m)
            machines = MachineBoss.MachineBoss(m)
            RandomSearchStatistics.RandomSearchStatistics(machines, jobs)

            makeSpan = machines.maxMachine().makeSpan
            OPT = max(jobs.MAXJOB, jobs.sumJobTime / float(m))
            bestS, bestM = "", ""
            if makeSpan < bestS:
                bestS = makeSpan
                bestM = m
                bestR = OPT
            exTime = time.time() - startTime
            #print "Max Machine Run time: "+ str(makeSpan) + " OPT for " + str(m) + " machines is " + str(ratio)
            #print "ratio: " + str(makeSpan/ratio)

    line = line + str(bestS / OPT) + "\n"
    writeFile.write(line)
    print "RSH"
コード例 #36
0
#dex_storage.combine(cnlo_storage)
#dex_storage.combine(no3_storage)

#dex_storage.normalize()
no3_storage.normalize()
cnlo_storage.normalize()
cnlo_no3_storage.normalize()
#all_storage.normalize()

ts_storage = [kno3_1, kno3_2, kno3_3, kno3_4]
#for s in ts_storage:
    #s.normalize()

# Setup job manager
jobman = JobManager(settings)

# Make BANJO jobs
#mczjob = MCZ()
#mczjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "MCZ")
#jobman.queueJob(mczjob)

clr_cnlojob = CLR()
clr_cnlojob.setup(cnlo_storage, settings, "clr_cnlo")
jobman.queueJob(clr_cnlojob)

genie3_cnlojob = GENIE3()
genie3_cnlojob.setup(cnlo_storage, settings, "genie3_cnlo")
jobman.queueJob(genie3_cnlojob)

clr_no3job = CLR()
コード例 #37
0
	if cm.get("ts.daemonize"):
		if pidfile:
			getLogger().info("Daemonizing, using pid file %s..." % pidfile)
		else:
			getLogger().info("Daemonizing...")
		Tools.daemonize(pidFilename = pidfile, displayPid = True)


	# Main start
	cm.set_transient("ts.pid", os.getpid())
	try:
		serverThread = XmlRpcServerThread() # Ws server
		FileSystemManager.initialize()
		EventManager.initialize() # Xc server, Ih server [TSE:CH], Il server [TSE:TL]
		ProbeManager.initialize() # Ia client
		JobManager.initialize() # Job scheduler
		serverThread.start()
		getLogger().info("Started.")
		while 1:
			time.sleep(1)
	except KeyboardInterrupt:
		getLogger().info("Shutting down Testerman Server...")
	except Exception, e:
		sys.stderr.write("Unable to start server: %s\n" % str(e))
		getLogger().critical("Unable to start server: " + str(e))

	serverThread.stop()
	JobManager.finalize()
	ProbeManager.finalize()
	EventManager.finalize()
	FileSystemManager.finalize()
コード例 #38
0
ファイル: unitLoop.py プロジェクト: AINukeHere/Comggagi_acme
def main():
    # 새로운유닛루프를 돌면서 건물이 지어지면 타일DB 업데이트
    for ptr,epd in LoopNewUnit():
        unitTypeEPD = epd + 0x64 // 4
        unitType = f_dwread_epd(unitTypeEPD)
        if EUDIf()(unitType == EncodeUnit("Terran SCV")):
            # secondaryOrderPosition // unused -> 배정된 Job 인덱스를 저장
            assignedJobIndex = epd + 0xE8 // 4
            DoActions(SetMemoryEPD(assignedJobIndex, SetTo, -1)) # 초기값 -1 (배정되지않음)
            f_simpleprint('0xE8 = -1')
        EUDEndIf()

        statusFlags = epd + 0xDC //4
        # 지상건물에 대해서
        if EUDIf()(MemoryXEPD(statusFlags, AtLeast, 1, 2)):
            #unused_0x8C -> 죽음상태플래그 0이면 생존 1이면 사망
            deathFlag = epd + 0x8C // 4
            DoActions(SetMemoryXEPD(deathFlag, SetTo, 0, 0xFFFF)) # 초기값 0 (죽지않은 상태)

            # 광물지대도 인식되어버리므로 예외처리
            EUDContinueIf(EUDSCOr()
            (unitType == EncodeUnit('Mineral Field (Type 1)'))
            (unitType == EncodeUnit('Mineral Field (Type 2)'))
            (unitType == EncodeUnit('Mineral Field (Type 3)'))
            (unitType == EncodeUnit('Vespene Geyser'))
            ())

            unitPosX_EPD = epd + 0x28 //4
            unitPosY_EPD = epd + 0x2A //4
            unitPosX = f_wread_epd(unitPosX_EPD, 0)
            unitPosY = f_wread_epd(unitPosY_EPD, 2)
            buildSizeX = BuildingInfo.GetBuildSizeX(unitType)
            buildSizeY = BuildingInfo.GetBuildSizeY(unitType)
            buildingXmin = (unitPosX // 32) - buildSizeX // 2
            buildingYmin = (unitPosY // 32) - buildSizeY // 2
            TileManager.OnNewBuilding(buildingXmin,buildingYmin,buildSizeX,buildSizeY)
        EUDEndIf()

    # 전체 유닛루프
    for ptr, epd in EUDLoopUnit2():
        orderID = epd + 0x4D // 4
        statusFlags = epd + 0xDC //4
        #unused_0x8C
        deathFlag = epd + 0x8C // 4
        orderIDValue = f_bread_epd(orderID, 0x4D % 4)
        # 유닛이 파괴되었을 경우
        if EUDIf()(EUDSCAnd()
        (orderIDValue == 0)
        (MemoryXEPD(statusFlags, AtLeast, 1, 2))
        ()):
            if EUDIfNot()(MemoryXEPD(deathFlag, Exactly, 1, 0xFFFF)):
                DoActions(SetMemoryXEPD(deathFlag, SetTo, 1, 0xFFFF))
                f_simpleprint('Destory Ground Building')
                unitPosX_EPD = epd + 0x28 //4
                unitPosY_EPD = epd + 0x2A //4
                unitPosX = f_wread_epd(unitPosX_EPD, 0)
                unitPosY = f_wread_epd(unitPosY_EPD, 2)
                unitTypeEPD = epd + 0x64 // 4
                unitType = f_dwread_epd(unitTypeEPD)
                buildSizeX = BuildingInfo.GetBuildSizeX(unitType)
                buildSizeY = BuildingInfo.GetBuildSizeY(unitType)
                buildingXmin = (unitPosX // 32) - buildSizeX // 2
                buildingYmin = (unitPosY // 32) - buildSizeY // 2
                TileManager.OnDestroyBuilding(buildingXmin,buildingYmin,buildSizeX,buildSizeY)
            EUDEndIf()
        EUDEndIf()
        JobManager.OnUnitLooping(epd)
    JobManager.OnUnitLoopEnd()
コード例 #39
0
        if iteration == 1:
            extractor.setTrainingStructs()
        elif iteration > 1:
            extractor.setStructList(newStructs)

        toCalculate = extractor.getStructList()
        extractor.extract()
    
        # Convert the extracted pseudo-POSCARs to VASP POSCAR files, make directories for them
        # and put the POSCARs in their corresponding directories.
        subprocess.call(['echo','\nConverting outputs to VASP inputs. . .\n'])
        toPoscar = Structs2Poscar.Structs2Poscar(atomList, toCalculate)
        toPoscar.convertOutputsToPoscar()
     
        # Start VASP jobs and wait until they all complete or time out.
        manager = JobManager.JobManager(atomList)
        manager.runLowJobs(toCalculate)
        manager.runNormalJobs(toCalculate)
        manager.runDOSJobs(toCalculate)
    
        # Create structures.in and structures.holdout files for each atom.
        uncleFileMaker = MakeUncleFiles.MakeUncleFiles(atomList)
        uncleFileMaker.makeUncleFiles()
        
        # Get all the structs that have been through VASP calculations for each atom. These
        # should be sorted by formation energy during the work done by makeUncleFiles()
        [vaspStructs, failedStructs] = uncleFileMaker.getStructureList()
        structuresInLengths = uncleFileMaker.getStructuresInLengths() 
        
        # Perform a fit to the VASP data in structures.in for each atom.
        fitter = Fitter.Fitter(atomList, fitStructs, fitSubsets, structuresInLengths, uncleOutput)
コード例 #40
0
	
	# Mesh fineness definition
Mesh_VOLUTE_max_area		= 0.05
Mesh_VOLUTE_min_area		= 0.01
Mesh_ROTOR_max_area		= 0.03
Mesh_ROTOR_min_area		= 0.00291902
Mesh_INLET_max_area		= 0.015
Mesh_INLET_min_area		= 0.001


"""

name = "MyReport"  # Remember to delete all previous reports with the same name
Pres = [100000, 120000, 140000]
Blade_Num = [12, 13, 14]
for p in Pres:
    for Blade in Blade_Num:

        old_Configuration_File = CurrentPath + "/" + "Original2DExternalDrag.cfg"
        newConfigFile = "newCfgBlade%iPres%i.cfg" % (Blade, p)

        JobManager.Compute_Mesh(CurrentPath, Geometry_Template, "Blade_Number",
                                Blade)
        JobManager.ModifyCfg(CurrentPath, old_Configuration_File,
                             newConfigFile, "MARKER_OUTLET",
                             "= ( BE_outlet, %i )" % (p))
        JobManager.Compute_BB(newConfigFile)
        JobManager.Report(name, CurrentPath, "CDrag", "CLift")
        JobManager.CopyMesh(CurrentPath, "newMesh%iPres%i" % (Blade, p))
        #JobManager.CopyCfgFile(CurrentPath,Configuration_File,"newCfgBlade%iPres%i" % Blade, p)
コード例 #41
0
    ko_file = settings["global"]["dream4100_network_knockout_file"].split()
    kd_file = settings["global"]["dream4100_network_knockdown_file"].split()
    ts_file = settings["global"]["dream4100_network_timeseries_file"].split()
    wt_file = settings["global"]["dream4100_network_wildtype_file"].split()

# Read data into program
# Where the format is "FILENAME" "DATATYPE"
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
timeseries_storage = ReadData(ts_file[0], "timeseries")
wildtype_storage = ReadData(wt_file[0], "wildtype")



# Setup job manager
jobman = JobManager(settings)

# Make NIR jobs
min_restk = max(len(knockout_storage.gene_list) / 5, 3)
max_restk = len(knockout_storage.gene_list) / 2 + 1
rest_list = list(set([3,5,20,21] + [i for i in range(min_restk, max_restk)]))
rest_list = [3,5,10,15,12,20,21]
for i in rest_list:
    nirjob = NIR()
    nirjob.setup(knockout_storage, settings, "NIR_K="+str(i), 5, i)
    jobman.queueJob(nirjob)

print jobman.queue
jobman.runQueue()
jobman.waitToClear()
コード例 #42
0
ファイル: WebServices.py プロジェクト: delian/testerman
def scheduleCampaign(source, campaignId, username, session, at, path=None):
    """
	Schedule an ATS to start at <at>
	
	@since: 1.2

	@type  ats: string
	@param ats: the ats contents, as a utf-8 string
	@type  atsId: string
	@param atsId: a friendly identifier/job label
	@type  username: string
	@param username: the username of the user who scheduled this ATS
	@type  session: dict[utf-8 strings] of utf8 strings
	@param session: input session variables (may be empty)
	@type  at: float, or None
	@param at: the timestamp at which the ats should be started.
	           If set to None or lower than current (server) time, immediate start.
	@type  path: string, or None
	@param path: the complete docroot-path to the file associated to the source,
	             if any. For source files not located on the server, set to None.
	             For the other ones, enables to know where to search dependencies
	             from.
	
	@throws Exception: in case of an internal error

	@rtype: dict { 'job-uri': string, 'job-id': integer, 'message': string }
	@returns: a dict containing: 
	          job-uri: the newly created job uri, only valid if status == 0
	          job-id: the newly created job id, only valid if status == 0
	          message: a human readable string indicating what was done.
	"""
    getLogger().info(">> scheduleCampaign(..., session = %s)" % str(session))

    try:
        # FIXME: ats and the dict of string seems to be received as unicode,
        # whereas they were sent by the client as UTF-8.
        # I should check on the wire and/or an XML-RPC feature somewhere (default encoding, etc).

        # Translate the session into a dict[unicode] of unicode
        s = {}
        if session:
            for (k, v) in session.items():
                s[k] = v

        source = source.encode('utf-8')

        job = JobManager.CampaignJob(campaignId, source, path)
        job.setUsername(username)
        job.setScheduledStartTime(at)
        job.setScheduledSession(s)
        jobId = JobManager.instance().submitJob(job)
        message = ""
        if at is None or at <= time.time():
            message = "immediate start"
        else:
            message = "will start on %s" % time.strftime(
                "%Y%m%d, at %H:%M:%S",
                time.localtime(job.getScheduledStartTime()))
        res = {
            'job-id':
            jobId,
            'job-uri':
            job.getUri(),
            'message':
            "Campaign scheduled, %s. Its job ID is %d" % (message, jobId)
        }
    except Exception as e:
        e = Exception("Scheduling error: %s" % (str(e)))
        getLogger().info("<< scheduleCampaign(...): Fault:\n%s" %
                         Tools.getBacktrace())
        raise (e)

    getLogger().info("<< scheduleCampaign(...): %s" % str(res))
    return res
コード例 #43
0
                #-- end if
            elif msg == "err":
                # set error state
                bGoOn = False
            #-- end if
        #-- end while
        self.done = True
        self.fLog.close()

    #-- end def


#-- end class

if __name__ == '__main__':
    jm = JobManager.JobManager('.', '*.txt')

    #-- for all hosts: spawn worker
    #-- loop with server socket: commsock=s.accept()
    #-- comms.append(commsock, jm, host, host_dir, "log_"+host+".lof")

    print("creating comms")
    comms = []
    for i in range(10):
        comms.append(
            Communicator(None, jm, "nudibranch", "4000",
                         "/home/jody/progs/QHG3/work", "arfl_%02d.lst" % (i)))
    #-- end for
    print("creating  threads")
    my_threads = []
    for x in comms:
コード例 #44
0
    settings["global"]["experiment_name"] + "-" + t + "/"
os.mkdir(settings["global"]["output_dir"])

# Read in the gold standard network

# Read in the gold standard network
goldnet = Network()
#goldnet.read_goldstd(settings["global"]["large_network_goldnet_file"])
ko_file, kd_file, ts_file, wt_file, mf_file, goldnet = get_example_data_files(sys.argv[1], settings)


# Read data into program
# Where the format is "FILENAME" "DATATYPE"
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
timeseries_storage = ReadData(ts_file[0], "timeseries")
wildtype_storage = ReadData(wt_file[0], "wildtype")



# Setup job manager
jobman = JobManager(settings)

clusterjob = Cmonkey()
clusterjob.setup(knockout_storage, settings)

jobman.queueJob(clusterjob)
jobman.runQueue()
jobman.waitToClear()

コード例 #45
0
    ko_file = settings["global"]["dream4100_network_knockout_file"].split()
    kd_file = settings["global"]["dream4100_network_knockdown_file"].split()
    ts_file = settings["global"]["dream4100_network_timeseries_file"].split()
    wt_file = settings["global"]["dream4100_network_wildtype_file"].split()
    mf_file = settings["global"]["dream4100_network_multifactorial_file"].split()

# Read data into program
# Where the format is "FILENAME" "DATATYPE"
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
timeseries_storage = ReadData(ts_file[0], "timeseries")
wildtype_storage = ReadData(wt_file[0], "wildtype")
mf_storage = ReadData(mf_file[0], "multifactorial")

# Setup job manager
jobman = JobManager(settings)

# Make BANJO jobs
mczjob = MCZ()
mczjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "MCZ_Alone")
jobman.queueJob(mczjob)

clrjob = CLR()
clrjob.setup(knockout_storage, settings, "clr_" + t + "_Bins-" + str(6), "plos", 6)
jobman.queueJob(clrjob)

#cojob = ConvexOptimization()
#cojob.setup(knockout_storage, settings, "ConvOpt_T-Plos",None, None, 0.04)
#jobman.queueJob(cojob)

mf_storage.combine(knockout_storage)
コード例 #46
0
        continue
    data[name] = ReadData(exp_data_directory + '/' + name + '/' + timeseries_filename, "timeseries")
    for ts in data[name]:
        ts.normalize()
    knockouts[name] = ReadData(exp_data_directory + '/' + name + '/' + knockout_filename, "knockout")
    knockouts[name].normalize()
    knockdowns[name] = ReadData(exp_data_directory + '/' + name + '/' + knockdown_filename, "knockdown")
    knockdowns[name].normalize()
    wildtypes[name] = ReadData(exp_data_directory + '/' + name + '/' + wildtype_filename, "wildtype")
    wildtypes[name].normalize()
    multifactorials[name] = ReadData(exp_data_directory + '/' + name + '/' + multifactorial_filename, "multifactorial")
    multifactorials[name].normalize()
    goldnets[name] = exp_data_directory + '/' + name + '/' + goldstandard_filename


jobman = JobManager(settings)

# Get TFS from the goldstandard
tfs = {}
for name in data.keys():
    t = []
    goldnet = Network()
    goldnet.read_goldstd(goldnets[name])
    for gene1 in goldnet.network:
        for gene2 in goldnet.network[gene1]:
            if goldnet.network[gene1][gene2] > 0:
                t.append(gene1)
    tfs[name] = list(set(t))

goldnet = Network()
goldnet.read_goldstd(goldnets[data.keys()[0]])
コード例 #47
0
# Set up output directory
t = datetime.now().strftime("%Y-%m-%d_%H.%M.%S")
settings["global"]["output_dir"] = (
    settings["global"]["output_dir"] + "/" + settings["global"]["experiment_name"] + "-" + t + "/"
)
os.mkdir(settings["global"]["output_dir"])


# Read in configs for this algorithm
from dfg4grn import *

settings = ReadConfig(settings, "./config/default_values/dfg4grn.cfg")
settings = ReadConfig(settings, settings["dfg4grn"]["config"])
grid = Generate_Grid("dfg4grn", None, settings, ["eta_z", "lambda_w", "tau"], 5).test_list

jobman = JobManager(settings)

dfg = DFG4GRN()
settings["dfg4grn"]["eta_z"] = 0.1
settings["dfg4grn"]["lambda_w"] = 0.01
settings["dfg4grn"]["tau"] = 3.5
dfg.setup(
    timeseries_storage,
    TFList(timeseries_storage[0].gene_list),
    settings,
    "EtaZ-{0}_LamdaW-{1}_Tau-{2}".format(0.1, 0.01, 3.5),
    20,
)
jobman.queueJob(dfg)

dfg = DFG4GRN()
コード例 #48
0
ファイル: WebServices.py プロジェクト: rolfschr/testerman
def scheduleCampaign(source, campaignId, username, session, at, path = None):
	"""
	Schedule an ATS to start at <at>
	
	@since: 1.2

	@type  ats: string
	@param ats: the ats contents, as a utf-8 string
	@type  atsId: string
	@param atsId: a friendly identifier/job label
	@type  username: string
	@param username: the username of the user who scheduled this ATS
	@type  session: dict[utf-8 strings] of utf8 strings
	@param session: input session variables (may be empty)
	@type  at: float, or None
	@param at: the timestamp at which the ats should be started.
	           If set to None or lower than current (server) time, immediate start.
	@type  path: string, or None
	@param path: the complete docroot-path to the file associated to the source,
	             if any. For source files not located on the server, set to None.
	             For the other ones, enables to know where to search dependencies
	             from.
	
	@throws Exception: in case of an internal error

	@rtype: dict { 'job-uri': string, 'job-id': integer, 'message': string }
	@returns: a dict containing: 
	          job-uri: the newly created job uri, only valid if status == 0
	          job-id: the newly created job id, only valid if status == 0
	          message: a human readable string indicating what was done.
	"""
	getLogger().info(">> scheduleCampaign(..., session = %s)" % str(session))

	try:
		# FIXME: ats and the dict of string seems to be received as unicode,
		# whereas they were sent by the client as UTF-8.
		# I should check on the wire and/or an XML-RPC feature somewhere (default encoding, etc).

		# Translate the session into a dict[unicode] of unicode
		s = {}
		if session:
			for (k, v) in session.items():
				s[k] = v
		
		source = source.encode('utf-8')
		
		job = JobManager.CampaignJob(campaignId, source, path)
		job.setUsername(username)
		job.setScheduledStartTime(at)
		job.setScheduledSession(s)
		jobId = JobManager.instance().submitJob(job)
		message = ""
		if at is None or at <= time.time():
			message = "immediate start"
		else:
			message = "will start on %s" % time.strftime("%Y%m%d, at %H:%M:%S", time.localtime(job.getScheduledStartTime()))
		res = { 'job-id': jobId, 'job-uri': job.getUri(), 'message': "Campaign scheduled, %s. Its job ID is %d" % (message, jobId) }
	except Exception, e:
		e =  Exception("Scheduling error: %s" % (str(e)))
		getLogger().info("<< scheduleCampaign(...): Fault:\n%s" % Tools.getBacktrace())
		raise(e)
コード例 #49
0
ファイル: CrabMonitor.py プロジェクト: blinkseb/cat
    def run(self):
        interrupted = False
        corrupted_job_regex = re.compile("Output files for job (\d+) seems corrupted")
        email = Config.get().get()["email"]

        try:
            while True:
                if not Utils.is_proxy_valid():
                    Utils.delegate_proxy(self.verbose)

                self.status()

                get_id = []
                kill_id = []
                submit_id = []
                resubmit_id = []
                force_resubmit_id = []
                corrupted_id = []
                
                n_waiting = 0
                n_running = 0

                for (id, job) in self.jobs.items():
                    status = JobManager.create(job)

                    if status.gettable():
                        get_id.append(str(id))

                    if status.killable():
                        kill_id.append(str(id))

                    if status.submittable():
                        submit_id.append(str(id))

                    if status.failed():
                        resubmit_id.append(str(id))

                    if status.running():
                        n_running = n_running + 1

                    if status.waiting():
                        n_waiting = n_waiting + 1

                    job._status = status

                if self.verbose:
                    if len(get_id) > 0:
                        print("I'll get jobs " + ",".join(get_id))
                    else:
                        print("No job to get output for")

                    if len(kill_id) > 0:
                        print("I'll kill jobs " + ",".join(kill_id))
                    else:
                        print("No job to kill")

                    if len(resubmit_id) > 0:
                        print("I'll resubmit jobs " + ",".join(resubmit_id))
                    else:
                        print("No job to resubmit")

                    print("")
                    self.dump()

                log = ""
                if not self.dry_run:
                    if len(get_id) > 0:
                        if self.verbose:
                            print("Retrieving jobs...")
                        (output, returncode) = Utils.runCrab("get", ",".join(get_id), self.folder)
                        log += "crab -get output:\n"
                        log += "".join(output)
                        log += "\n"

                        # Detect corrupted jobs
                        lines = output
                        for line in lines:
                            matches = re.search(corrupted_job_regex, line)
                            if matches is not None:
                                corrupted_id.append(str(matches.group(1)))

                    if len(corrupted_id) > 0:
                        if self.verbose:
                            print("Some jobs are corrupted: " + ",".join(corrupted_id))
                        kill_id.extend(corrupted_id)
                        kill_id.sort()
                        force_resubmit_id.extend(corrupted_id)
                        force_resubmit_id.sort()

                    if len(kill_id) > 0:
                        if self.verbose:
                            print("Killing jobs...")
                        (output, returncode) = Utils.runCrab("kill", ",".join(kill_id), self.folder)
                        log += "crab -kill output:\n"
                        log += "".join(output)
                        log += "\n"

                    if len(submit_id) > 0:
                        # Crab only accept a maximum of 500 jobs on submit.
                        splitted_submit_ids = chunks(submit_id, 500)
                        for splitted_submit_id in splitted_submit_ids:
                            if self.verbose:
                                print("Submitting jobs...")
                            (output, returncode) = Utils.runCrab("submit", ",".join(splitted_submit_id), self.folder)
                            log += "crab -submit output:\n"
                            log += "".join(output)
                            log += "\n"


                    if len(resubmit_id) > 0:
                        if self.verbose:
                            print("Resubmitting jobs...")
                        (output, returncode) = Utils.runCrab("resubmit", ",".join(resubmit_id), self.folder)
                        log += "crab -resubmit output:\n"
                        log += "".join(output)
                        log += "\n"
                    
                    if len(force_resubmit_id) > 0:
                        if self.verbose:
                            print("Force-resubmitting jobs...")
                        (output, returnCode) = Utils.runCrab("forceResubmit", ",".join(force_resubmit_id), self.folder)
                        log += "crab -forceResubmit output:\n"
                        log += "".join(output)
                        log += "\n"

                    print("\nAll actions done")
                    print("")

                Email.sendReport(email, self.folder, get_id, kill_id, resubmit_id, force_resubmit_id, corrupted_id, log, self.jobs)

                if len(resubmit_id) == 0 and n_running == 0 and n_waiting == 0:
                    break
                
                if self.verbose:
                    print("[%s] Going to sleep for 2 hours" % strftime("%H:%M:%S", localtime()))

                # Wait for 2 hours
                self.exit.wait(2 * 60 * 60)
                if self.exit.is_set():
                    interrupted = True
                    break

                print("-----------------------------")
                print("")

            if not interrupted:
                Email.sendComplete(email, self.folder, self.jobs)
        except:
            # Send an email on exception
            Email.sendCrash(email, self.folder)
            raise
コード例 #50
0
    def WriteExperiment(self,
                        Algorithm,
                        Distribution,
                        Sorting,
                        k,
                        m,
                        JobsSizes=range(10, 1000)):
        #Check input data
        if not (Algorithm in self.Algorithms):
            raise ValueError(
                "Please choose one of algorithms from ExperimentWriter class")

        if not (Distribution in self.Distributions):
            raise ValueError(
                "Please choose one of distributions from ExperimentWriter class"
            )

        if not (Sorting in self.Sortings):
            raise ValueError(
                "Please choose one of sortings from ExperimentWriter class")

        Results = []
        #Generate jobs sets
        for j in JobsSizes:
            Jobs = []
            if Distribution == self.Distr_Normal:
                Jobs = DistributionGenerator.getGauss(j, 5)
            elif Distribution == self.Distr_Pareto:
                Jobs = DistributionGenerator.getPareto(j)

            #Sort input set
            if Sorting == self.Sort_Sorted:
                Jobs.sort()
            elif Sorting == self.Sort_ReverseSorted:
                Jobs.sort(reverse=True)

            fileName = "inputData" + str(j) + ".txt"
            tmpFile = open(fileName, 'w')
            for job in Jobs:
                tmpFile.write(str(job) + "\n")

            tmpFile.close()

            #Select scheduler
            #Scheduler = None
            Jobs = JobManager.JobManager(k, fileName, m)
            machines = MachineBoss.MachineBoss(m)

            if Algorithm == self.Alg_SortedGreedy:
                SortedGreedyScheduler.SortedGreedyScheduler(machines, Jobs)

            elif Algorithm == self.Alg_Random:
                RandomScheduler.RandomScheduler(machines, Jobs)

            makeSpan = machines.maxMachine().makeSpan
            #print makeSpan
            LB = max(Jobs.sumJobTime / float(m), max(Jobs.jobs))
            ratio = float(makeSpan) / float(LB)
            bestS, bestM = "", ""

            ResultsRow = []
            ResultsRow += [k]
            ResultsRow += [m]
            ResultsRow += [j]
            ResultsRow += [makeSpan]
            ResultsRow += [ratio]
            ResultsRow += [""]
            Results += [ResultsRow]

        writeResultsToCSV("TryMe.csv", Results)
コード例 #51
0
  return cache[name]

# Gather networks
ko_file, kd_file, ts_file, wt_file, mf_file, goldnet = get_example_data_files(sys.argv[1], settings)

# Read data into program
# Where the format is "FILENAME" "DATATYPE"
mf_storage = ReadData(mf_file[0], "multifactorial")
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
wildtype_storage = ReadData(wt_file[0], "wildtype")
timeseries_storage = ReadData(ts_file[0], "timeseries")
gene_list = knockout_storage.gene_list
votejob = MCZ()
votejob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "SimAnnealing")
jobman = JobManager(settings)
jobman.queueJob(votejob)
votejob = jobman.queue[0]
jobman.runQueue()
jobman.waitToClear("VotingJob")

# Send to voting algorithm
dream410 = ["dream410","dream410_2","dream410_3","dream410_4","dream410_5"]
#dream410 = ["dream410","dream410_2"]
dream4100 = ["dream4100","dream4100_2","dream4100_3","dream4100_4","dream4100_5"]
if sys.argv[1] == "dream410":
  networks = dream410
elif sys.argv[1] == "dream4100":
  networks = dream4100
else:
  networks = [sys.argv[1]]
コード例 #52
0
def test_JobManager_kill_get_jobs_running_completed_stopped():
    jobManager = JobManager()
    id1 = jobManager.create_job("ping 127.0.0.1")
    id2 = jobManager.create_job("ping 127.0.0.1")
    id3 = jobManager.create_job("ping 127.0.0.1")
    id4 = jobManager.create_job("ping 127.0.0.1")
    time.sleep(2)  # Give thread and proc a chance to start
    killed = jobManager.kill_job(id1)
    assert (killed == True)
    assert (len(jobManager.get_jobs_stopped()) == 1)
    running = jobManager.get_jobs_running()
    while (len(running) > 0):
        for job_id in running:
            job = jobManager.get_job(job_id)
            assert (job.status == JobStatus.RUNNING)
        running = jobManager.get_jobs_running()
        time.sleep(0.5)
    assert (len(jobManager.get_jobs_completed()) == 3)
    assert (len(jobManager.get_jobs_stopped()) == 1)
コード例 #53
0
ファイル: TestermanServer.py プロジェクト: rishie/testerman
    if cm.get("ts.daemonize"):
        if pidfile:
            getLogger().info("Daemonizing, using pid file %s..." % pidfile)
        else:
            getLogger().info("Daemonizing...")
        Tools.daemonize(pidFilename=pidfile, displayPid=True)

    # Main start
    cm.set_transient("ts.pid", os.getpid())
    try:
        serverThread = XmlRpcServerThread()  # Ws server
        FileSystemManager.initialize()
        EventManager.initialize(
        )  # Xc server, Ih server [TSE:CH], Il server [TSE:TL]
        ProbeManager.initialize()  # Ia client
        JobManager.initialize()  # Job scheduler
        serverThread.start()
        getLogger().info("Started.")
        while 1:
            time.sleep(1)
    except KeyboardInterrupt:
        getLogger().info("Shutting down Testerman Server...")
    except Exception, e:
        sys.stderr.write("Unable to start server: %s\n" % str(e))
        getLogger().critical("Unable to start server: " + str(e))

    serverThread.stop()
    JobManager.finalize()
    ProbeManager.finalize()
    EventManager.finalize()
    FileSystemManager.finalize()
コード例 #54
0
  #bjob.test_net = test_net
  bjob.train_net = name
  jobman.queueJob(bjob)

  return jobman.queue[:]

# Gather networks

# Send to voting algorithm
dream410 = ["dream410","dream410_2","dream410_3","dream410_4","dream410_5"]
dream4100 = ["dream4100","dream4100_2","dream4100_3","dream4100_4","dream4100_5"]
networks = dream410
results = []

# Setup job manager
jobman = JobManager(settings)
goldnets = {}
for net in networks:
  ko_file, kd_file, ts_file, wt_file, mf_file, training_goldnet = get_example_data_files(name, settings)
  goldnets[net] = training_goldnet


training_jobs = []
#goldnets = []
for name in networks:
  if name != test_net:
    get_network_results(name, settings, jobman)
    #ko_file, kd_file, ts_file, wt_file, mf_file, training_goldnet = get_example_data_files(name, settings)
    #goldnets.append(training_goldnet)

print jobman.queue
コード例 #55
0
    def on_fetch_button_clicked(self,b):
        # error checking ===========================================================

        if self.uploaded_filename == None:
            self.hginterface.alert('Please upload a shape file first.', type = 'warning')
            return
        #print("get data!")

        # check date values
        if self.hginterface.filePickerAndDatePicker == False:
            try:
                dt.strptime(str(self.hginterface.dateFrom.value), "%m/%d/%Y")
                dt.strptime(str(self.hginterface.dateTo.value), "%m/%d/%Y")
            except ValueError as err:
                self.hginterface.alert('Date format needs to be mm/dd/yyyy', 'warning')
                return
            # return

        # check date from/to
        f = dt.strptime(str(self.hginterface.dateFrom.value), "%m/%d/%Y")
        t = dt.strptime(str(self.hginterface.dateTo.value), "%m/%d/%Y")
        if f > t:
            self.hginterface.alert('Please check the time-period from/to', 'warning')
            return

        # check valid data range for each data type
        if str(self.hginterface.dataType.value) == "SMAP":
            smapStart = dt.strptime("03/31/2015", "%m/%d/%Y")
            if f < smapStart:
                self.hginterface.alert('Please check the date for SMAP is starting from 03/31/2015', 'warning')
                return
        elif str(self.hginterface.dataType.value) == "MODIS-ET/PET/LE/PLE":
            data_limit_start = dt.strptime("01/01/2000", "%m/%d/%Y")
            data_limit_end = dt.strptime("12/31/2014", "%m/%d/%Y")
            if f < data_limit_start or t > data_limit_end:
                self.hginterface.alert('Supported date range for MODIS-ET/PET/LE/PLE is 01/01/2000 ~ 12/31/2014', 'warning')
                return
        # TODO: valid for 17years??
        elif str(self.hginterface.dataType.value) == "MODIS-LAI/FPAR":
            data_limit_start = dt.strptime("01/01/2002", "%m/%d/%Y")
            if f < data_limit_start:
                self.hginterface.alert('Supported date range for MODIS-LAI/FPAR is from 01/01/2002', 'warning')
                return
        elif str(self.hginterface.dataType.value) == "GPM":
            smapStart = dt.strptime("05/01/2014", "%m/%d/%Y")
            if f < smapStart:
                self.hginterface.alert('Please check the date for GPM is starting from 05/01/2014', 'warning')
                return


        # check date 1 year range
        r = relativedelta.relativedelta(f, t)
        if r.years != 0:
            self.hginterface.alert('Date range needs to be maximum of 1 year', 'warning')
            return

        # check data type
        # data type
        if self.hginterface.dataType.value not in self.supportedType:
            self.hginterface.alert('Current version supports following data type(s):\\n  - MODIS, SMAP, GPM', 'warning')
            return

        # job name
        if self.hginterface.txt_newjob_jobname.value == '':
            self.hginterface.alert('Please provide a name for data fetch request', 'warning')
            return

        # error checking done =======================================================================================


        self.hginterface.showLoading(msg='Running data fetch...')

        self.hginterface.clear_joblog()

        self.hginterface.enableFileUpload(False)
        self.hginterface.enableGUIOnProcess(False)


        params = {}
        params['jobname'] = self.hginterface.txt_newjob_jobname.value
        params['data_type']= self.hginterface.dataType.value
        params['data_from'] = self.hginterface.dateFrom.value
        params['data_to'] = self.hginterface.dateTo.value
        params['input_fname'] = self.uploaded_filename
        if params['data_type'] == 'GPM':
            if self.hginterface.dbox_gpm.index == 2: #30m nrt
                params['temporal_res'] = '30m_NRT'
            else:
                params['temporal_res'] = self.hginterface.dbox_gpm.value


        job = JobManager.JobManager(params, self.hginterface.print_joblog)
        ret, msg = job.submitJob()

        # after job submission
        self.hginterface.enableGUIOnProcess(True)
        self.hginterface.enableFileUpload(True)

        if ret == False:
            self.hginterface.alert(msg)
            self.hginterface.hideLoading()
            return

    
        self.hginterface.alert('Requested Job Finished.','success')
        self.hginterface.hideLoading()
コード例 #56
0
ファイル: JobOrganizer.py プロジェクト: l1calo/TrigT1CaloCAF
class JobOrganizer:

	jobInformationList = []
	jobConfigurationList = {}
	#runSetList = []
	configModule = None
	jobManager = None

	def __init__(self, sConfigFile):

		self.sConfigFile = sConfigFile

		self.dbConnection = ""

		self.logger = None
		self.initLogging("JobOrganizer")
		self.logger.info("========================================")
		self.logger.info("JobOrganizer in init")


		# check if initfile is valid
		if os.path.exists(self.sConfigFile):
			self.logger.info("init file " + self.sConfigFile + " was found")
			self.configModule = __import__(self.sConfigFile.strip(".py"))

			#self.createRunlisteners()
			#self.initRunListeners()
			self.initDbParameters()
			self.readJobsConfiguration()

			self.jobManager = JobManager(self.sConfigFile)

			Tools.createFolder('tmp/jobConfig')

		else:
			self.logger.error("init file "+ self.sConfigFile + " was not found !")
			sys.exit(0)

		self.logger.info("JobOrganizer initialized")
		self.logger.info("========================================")


	def initLogging(self, name):
		#create logger
		self.logger = logging.getLogger(name)
		#self.logger.setLevel(logging.DEBUG)
		self.logger.setLevel(logging.INFO)

		#create file handler and set level to debug
		logdir = os.environ['PWD']+'/logs'
		if not os.path.exists(logdir):
			os.mkdir(logdir)
		fh = logging.FileHandler(logdir+'/'+name+'_'+ time.strftime('%Y%m%d_%H%M')+'.log','w')
		fh.setLevel(logging.DEBUG)

		#create formatter
		formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s: %(message)s", datefmt='%Y-%m-%d %H:%M')

		#add formatter to fh
		fh.setFormatter(formatter)

		#add ch to logger
		self.logger.addHandler(fh)


	def readJobsConfiguration(self):

		self.logger.info("creating job configurations from " + self.sConfigFile + " file")

		self.jobConfigurationList = {}

		for name, jobConfig in self.configModule.jobConfigurations.items():
			jc = JobConfiguration()

			jc.name = name
			jc.configurationTemplate = jobConfig["configurationTemplate"]
			jc.listeners = jobConfig["listeners"]
			jc.validateJob = jobConfig["validateJob"]

			self.jobConfigurationList[name] = jc


	def initDbParameters(self):
		self.logger.info("retrieveing Db parameters from " + self.sConfigFile + " file")
		self.dbConnection = self.configModule.dbconnection


	def getDbConnection(self):
		return sqlite.connect(self.dbConnection)

	def getNewRunsFromDb(self):

		# connect to DB
		connection = self.getDbConnection()

		cursor = connection.cursor()
		# we retrieve only the new runs with rawstatus = 'OK' ie when all datasets are available
		cursor.execute("select run, listener, validated, rawpath from RUNSTATUS where status='NEW'") # and rawstatus='OK'

		#build a list of runs from Db
		runsFromDb = []
		for row in cursor:
			ri = RunInformation()
			ri.runNumber = str(row[0])
			ri.runListenerName = str(row[1])
			if str(row[2])=="True":
				ri.validated = True
			ri.rawPath = str(row[3])
			runsFromDb.append(ri)

		connection.close()
		return runsFromDb


	def loadJobConfigModule(self, jobInformation):

		sConfigTpl = jobInformation.jobConfiguration.configurationTemplate
		#placeHolders = {}
		jobInformation.placeHolders['#RUN_NUMBER#'] = str(jobInformation.runNumber)
		jobInformation.placeHolders['#RUN_NUMBER_PADDED#'] = runNumberToStr(int(jobInformation.runNumber))
		jobInformation.placeHolders['#RUN_NUMBER_PADDED_8#'] = '0'+runNumberToStr(int(jobInformation.runNumber))

		jobInformation.placeHolders['#JOB_CONFIGURATION#'] = str(jobInformation.jobConfiguration.name)

		jobInformation.placeHolders['#DAEMON_JO_TEMPLATE_DIR#'] = str(self.configModule.JobOptionsTemplateDir)
		jobInformation.placeHolders['#DAEMON_SCRIPTS_DIR#'] = str(self.configModule.ScriptsDir)
		jobInformation.placeHolders['#DAEMON_BACKENDS_DIR#'] = str(self.configModule.BackEndsDir)

		jobInformation.placeHolders['#JOBS_WORKING_DIR#'] = str(self.configModule.JobsWorkingDir)


		jobWorkingDir = str(self.configModule.JobsWorkingDir)+'/'+str(jobInformation.runNumber)+'_'+str(jobInformation.jobConfiguration.name)
		jobLogDir = jobWorkingDir + '/log'
		jobRootDir = jobWorkingDir + '/root'
		jobPoolDir = jobWorkingDir + '/pool'
		jobConfigDir = jobWorkingDir + '/config'
		jobPostDir = jobWorkingDir + '/post'

		jobInformation.placeHolders['#JOB_WORKING_DIR#'] = jobWorkingDir
		jobInformation.placeHolders['#JOB_LOG_DIR#'] = jobLogDir
		jobInformation.placeHolders['#JOB_ROOT_DIR#'] = jobRootDir
		jobInformation.placeHolders['#JOB_POOL_DIR#'] = jobPoolDir
		jobInformation.placeHolders['#JOB_CONFIG_DIR#'] = jobConfigDir
		jobInformation.placeHolders['#JOB_POST_DIR#'] = jobPostDir

		jobInformation.placeHolders['#CASTOR_DATA_DIR#'] = self.configModule.CastorDataDir


		#get configtpl

		sConfigModule = str(jobInformation.runNumber)+'_'+str(jobInformation.jobConfiguration.name)+'_jobConfig.py'
		sInputPath = self.configModule.ConfigTemplateDir+'/'+sConfigTpl

		#need to check or create folders
		sTmpModulePath = self.configModule.TmpInstallDir+'/jobConfig'
		sOutputPath = sTmpModulePath+'/'+sConfigModule
		jobInformation.jobConfigModulePath = sOutputPath

		replaceTag(sInputPath, sOutputPath, jobInformation.placeHolders)

		if not sTmpModulePath in sys.path:
		    sys.path.append(sTmpModulePath)

		jobInformation.jobConfigModule = __import__(sConfigModule.strip(".py"))

		return


	def initJobInformationListFromDb(self):

		connection = self.getDbConnection()
		cursor = connection.cursor()

		cursor.execute('select run, jobconfiguration, batchid, status, validation, rawpath, jobstart, jobend from JOBSTATUS where validation="WAITING" OR ((validation="YES" OR validation="NONE") AND status<>"DONE")')
		for row in cursor:

			if row[1] in self.jobConfigurationList.keys():
				jobInfo = JobInformation()

				jobInfo.runNumber = row[0]
				jobInfo.jobConfiguration = self.jobConfigurationList[row[1]]
				jobInfo.batchid = row[2]
				jobInfo.status = row[3]
				jobInfo.validation = row[4]
				jobInfo.rawPath = row[5]
				jobInfo.jobStart = row[6]
				jobInfo.jobEnd = row[7]
				jobInfo.dbConnection = self.dbConnection

				self.loadJobConfigModule(jobInfo)

				self.jobInformationList.append(jobInfo)


#		for jobInfo in self.jobInformationList:
#			print jobInfo

		connection.close()

		#send list of jobs not NEW to jobManager
		jobList = []
		for jobInfo in self.jobInformationList:
			if jobInfo.status!='NEW':
				jobList.append(jobInfo)

		self.jobManager.addJobsFromDb(jobList)
		return

	def newJobsFromDb(self):
		jobList = []

		connection = self.getDbConnection()
		cursor = connection.cursor()

		cursor.execute('select run, jobconfiguration, batchid, status, validation, rawpath, jobstart, jobend from JOBSTATUS where validation="WAITING" OR ((validation="YES" OR validation="NONE") AND status<>"DONE")')
		for row in cursor:

			if row[1] in self.jobConfigurationList.keys():
				jobInfo = JobInformation()

				jobInfo.runNumber = row[0]
				jobInfo.jobConfiguration = self.jobConfigurationList[row[1]]
				jobInfo.batchid = row[2]
				jobInfo.status = row[3]
				jobInfo.validation = row[4]
				jobInfo.rawPath = row[5]
				jobInfo.jobStart = row[6]
				jobInfo.jobEnd = row[7]
				jobInfo.dbConnection = self.dbConnection

				self.loadJobConfigModule(jobInfo)

				jobList.append(jobInfo)

		return jobList

	def updateJobInformationList(self):

		connection = self.getDbConnection()
		cursor = connection.cursor()

		self.logger.info("Updating jobInformation list")

		###================================###
		# get list of new runs from the run Db
		runInfoList = self.getNewRunsFromDb()

		if runInfoList!=[]:
			self.logger.debug("-> New runs found in run DB:")

			# associate a jobConfigutation
			for runInfo in runInfoList:
				self.logger.debug(" * run "+runInfo.runNumber)

				for name, jobConfig in self.jobConfigurationList.items():
					if jobConfig.suits(runInfo):

	#					# if the jobConfig requires the run to be validated and that one is not
	#					# the job is not considered
	#					if jobConfig.validateJob:
	#						print 'tutu'
	#						if not runInfo.validated:
	#							continue

						#sould consider the number of files and create as many sub-jobs as needed
						# depending on jobConfiguration
						# jobInfo.subjob=1,2,3,...

						# create a new jobInformation
						jobInfo = JobInformation()
						jobInfo.dbConnection = self.dbConnection
						jobInfo.runNumber = runInfo.runNumber
						jobInfo.rawPath = runInfo.rawPath
						jobInfo.jobConfiguration = jobConfig
						self.loadJobConfigModule(jobInfo)

						jobInfo.status = 'NEW'

						needsValidation = (jobConfig.validateJob or runInfo.validated)
						self.logger.info("validate job: "+str(needsValidation))
						if needsValidation:
						        jobInfo.validation = 'WAITING'
						else:
						        jobInfo.validation = 'NONE'

						self.logger.info("jobInfo.validation: "+jobInfo.validation)

						#check db to see if no run/jobconfig already there
						cursor.execute('select run, jobconfiguration from JOBSTATUS')
						alreadyInJobDb = False
						for row in cursor:
							if row[0]==str(jobInfo.runNumber) and row[1]==str(jobInfo.jobConfiguration.name):
								alreadyInJobDb = True
								break

						if not alreadyInJobDb:
							self.jobInformationList.append(jobInfo)
							cursor.execute("insert into JOBSTATUS(id, run, jobconfiguration, status, validation) values (null, ?, ?, ?, ?)", (jobInfo.runNumber, jobInfo.jobConfiguration.name, jobInfo.status, jobInfo.validation))
							self.logger.info(jobInfo.runNumber+'_'+jobInfo.jobConfiguration.name+" added to jobInformation list")

						else:
							self.logger.info(jobInfo.runNumber+'_'+jobInfo.jobConfiguration.name+" already exist in the job DB")


				# set run as 'seen' in runstatus DB
				cursor.execute("update RUNSTATUS set status=? where run=?",('OK',str(runInfo.runNumber)))

			connection.commit()

		else:
			self.logger.debug("-> No new runs found in run DB")

		###===============================================================###
		# check job with status = NEW that are not in current job list
		# ie job modified by external progs after the deamon start
		# reprocess attribute ?

		self.logger.debug("looking for new jobs in job DB")

		newJobs = self.newJobsFromDb()
		for newJob in newJobs:
			alreadyInJobList = False
			for jobInfo in self.jobInformationList:
				if str(jobInfo.runNumber)==str(newJob.runNumber) and str(jobInfo.jobConfiguration.name)==str(newJob.jobConfiguration.name):
					alreadyInJobList=True
					break
			if not alreadyInJobList:
				self.jobInformationList.append(newJob)
				self.logger.debug("job "+jobInfo.runNumber+'_'+jobInfo.jobConfiguration.name+" added to jobInformation list")

			else:
				self.logger.debug("job "+jobInfo.runNumber+'_'+jobInfo.jobConfiguration.name+" already in jobInformation list")


		###===============================================================###
		# update validation state from DB for jobs with validation=WAITING
		for jobInfo in self.jobInformationList:
			if jobInfo.validation=="WAITING":
				cursor.execute('select validation from JOBSTATUS where run=? and jobconfiguration=?',(jobInfo.runNumber, jobInfo.jobConfiguration.name))
				jobInfo.validation = cursor.fetchone()[0]


		###=============================###
		# update job status from JobManager
		self.jobManager.updateJobsStatus()


		###===========================================================###
		# remove job DONE or validation=BAD from  self.jobInformationList
		# do not touch the DB
		for jobInfo in reversed(self.jobInformationList):
			if jobInfo.validation=="BAD" or jobInfo.status=="DONE":

				self.logger.debug("removing jobInformation "+jobInfo.runNumber+'_'+jobInfo.jobConfiguration.name+ ", status: "+jobInfo.status+", validation: "+jobInfo.validation)

				#delete the jobInfo associated config file
				cmd = 'rm '+jobInfo.jobConfigModulePath.strip('.py')+'.*'
				commands.getoutput(cmd)

				#remove jobInfo form list
				self.jobInformationList.remove(jobInfo)

		connection.close()

		return


	def submitBatchJobs(self):

		jobInfoList=[]

		#select jobs to be processed (is validated==NONE or YES? is set/dependencies complete ?)
		for jobInfo in self.jobInformationList:
			if (jobInfo.validation=="NONE" or jobInfo.validation=="YES") and jobInfo.status=="NEW":
				jobInfoList.append(jobInfo)

		#print jobInfoList
		self.jobManager.addNewJobs(jobInfoList)

		return


	def processBatchJobs(self):
		self.jobManager.processJobs()
		return
コード例 #57
0
    data[name] = ReadData(exp_data_directory + '/' + name + '/' + timeseries_filename, "timeseries")

    timeseries_as_steady_state[name] = ReadData(exp_data_directory + '/' + name + '/' + timeseries_filename, "timeseries")
    #for ts in data[name]:
        #ts.normalize()
    knockouts[name] = ReadData(exp_data_directory + '/' + name + '/' + knockout_filename, "knockout")
    #knockouts[name].normalize()
    knockdowns[name] = ReadData(exp_data_directory + '/' + name + '/' + knockdown_filename, "knockdown")
    #knockdowns[name].normalize()
    wildtypes[name] = ReadData(exp_data_directory + '/' + name + '/' + wildtype_filename, "wildtype")
    #wildtypes[name].normalize()
    multifactorials[name] = ReadData(exp_data_directory + '/' + name + '/' + multifactorial_filename, "multifactorial")
    #multifactorials[name].normalize()
    goldnets[name] = exp_data_directory + '/' + name + '/' + goldstandard_filename

jobman = JobManager(settings)



# Get TFS from the goldstandard
tfs = {}
for name in data.keys():
    t = []
    goldnet = Network()
    goldnet.read_goldstd(goldnets[name])
    for gene1 in goldnet.network:
        for gene2 in goldnet.network[gene1]:
            if goldnet.network[gene1][gene2] > 0:
                t.append(gene1)
    tfs[name] = list(set(t))
コード例 #58
0
    settings["global"]["experiment_name"] + "-" + t + "/"
os.mkdir(settings["global"]["output_dir"])

# Get a list of the multifactorial files

# Read data into program
# Where the format is "FILENAME" "DATATYPE"
mf_storage = ReadData(mf_file[0], "multifactorial")
knockout_storage = ReadData(ko_file[0], "knockout")
knockdown_storage = ReadData(kd_file[0], "knockdown")
wildtype_storage = ReadData(wt_file[0], "wildtype")
timeseries_storage = ReadData(ts_file[0], "timeseries")
gene_list = knockout_storage.gene_list

# Setup job manager
jobman = JobManager(settings)

# MCZ
mczjob = MCZ()
mczjob.setup(knockout_storage, wildtype_storage, settings, timeseries_storage, knockdown_storage, "MCZ")
jobman.queueJob(mczjob)

# CLR
clrjob = CLR()
clrjob.setup(knockout_storage, settings, "CLR", "plos", 6)
jobman.queueJob(clrjob)

# GENIE3
mf_storage.combine(knockout_storage)
mf_storage.combine(wildtype_storage)
mf_storage.combine(knockdown_storage)