Ejemplo n.º 1
0
	def run_bulk(self, t, special_input=None):
		# run name
		runname="%s, ni=%f, T=%f" % (self.material,  self.ni, t)
		self.write_log("##############################################\n")
		self.write_log("### euorun: %s\n" % runname)
		self.write_log("##############################################\n")
		# run command
		runcmd=self.mpicmd + " -np %i " % self.np
		runcmd+=self.sp.get_runcmd_bulk(self.material, self.ni, t)
		# add additional parameter
		runcmd+=self.iteration_parameter
		# add output
		runoutput=self.output + self.idb.get_temp_output(t)
		runcmd+=" -o %s/" % runoutput
		runexists=self.run_exists(runcmd, runoutput, check_database=self.check_database)
		if not runexists:
			# add special input folder
			if special_input!=None:
				runcmd+=" -i %s" % (special_input)
			# search self.input folder for suitable input folders and add it
			else:
				if self.inputFlag:
					runcmd=database.add_input(runcmd, download_path=self.output+"/download/", path=self.input, source=self.source, input_system_name=self.input_system_name)
			# run job
			j=job.job(runname, self.log, self.email, [runcmd], logappend=True, verbose=self.verbose, mailcmd=self.mailcmd)
			j.run()

		# update database
		if self.updatedbFlag and not runexists:
			self.write_log("* Update bulk database\n")
			updatecmd="bulk_remote.py %s" % runoutput
			#subprocess.call(updatecmd, shell=True)
			j=job.job("update remote bulk database", self.log, self.email, [updatecmd], logappend=True, verbose=False, mailcmd=self.mailcmd)
			j.run()
		self.write_log("\n")
Ejemplo n.º 2
0
 def test_regal_create(self):
     j = job(1, 0)
     self.assertEqual(j.pt, 1)
     self.assertEqual(j.rd, 0)
     j = job()
     self.assertEqual(j.pt, 1)
     self.assertEqual(j.rd, 0)
     s = 'pt: 1, rd: 0'
     self.assertEqual(str(j), s)
Ejemplo n.º 3
0
 def test_heapq(self):
     ref_jobs = [job(1, 0), job(1, 3), job(2, 1), job(3, 1), job(4, 5)]
     target_jobs = []
     heapq.heappush(target_jobs, job(1, 3))
     heapq.heappush(target_jobs, job(3, 1))
     heapq.heappush(target_jobs, job(1, 0))
     heapq.heappush(target_jobs, job(4, 5))
     heapq.heappush(target_jobs, job(2, 1))
     for i in range(len(target_jobs)):
         self.assertEqual(ref_jobs[i], heapq.heappop(target_jobs))
Ejemplo n.º 4
0
    def run_isolated(self, t, special_input=None):
        # run name
        runname = "%s, N=%i, ni=%f, T=%f" % (self.material, self.N, self.ni, t)
        self.write_log("##############################################\n")
        self.write_log("### euorun: %s\n" % runname)
        self.write_log("##############################################\n")
        # run command
        runcmd = self.mpicmd + " -np %i " % self.np
        runcmd += self.sp.get_runcmd_isolated(self.material, self.N, self.ni,
                                              t)
        # add additional parameter
        runcmd += self.iteration_parameter
        # add output
        runoutput = self.output + self.idb.get_temp_output(t)
        runcmd += " -o %s/" % runoutput
        # check if run exists before downloading possible input
        runexists = self.run_exists(runcmd,
                                    runoutput,
                                    check_database=self.check_database)
        # run job
        if not runexists:
            # add special input folder
            if special_input != None:
                runcmd += " -i %s" % (special_input)
            # search self.input folder for suitable input folders and add it
            else:
                if self.inputFlag:
                    runcmd = database.add_input(
                        runcmd,
                        download_path=self.output + "/download/",
                        path=self.input,
                        source=self.source,
                        input_system_name=self.input_system_name)

            j = job.job(runname,
                        self.log,
                        self.email, [runcmd],
                        logappend=True,
                        verbose=self.verbose,
                        mailcmd=self.mailcmd)
            j.run()

        # update database
        if self.updatedbFlag and not runexists:
            self.write_log("* Update isolated database\n")
            updatecmd = "isolated_remote.py %s" % runoutput
            #subprocess.call(updatecmd, shell=True)
            j = job.job("update remote isolated database",
                        self.log,
                        self.email, [updatecmd],
                        logappend=True,
                        verbose=False,
                        mailcmd=self.mailcmd)
            j.run()
        self.write_log("\n")
Ejemplo n.º 5
0
def gen_jobs(jobs, co_id):
    jb = []
    for x in range(0, len(jobs)):
        splt = jobs[x].split(',')
        new_job = job(splt[2], co_id, splt[0], splt[1])
        jb.append(new_job)
    return jb
Ejemplo n.º 6
0
def request_cpu(task, ComputeMachine):
    """
    local function which is used to create a process for new/pre-empted job
    """
    global jobs_list
    global queue_count
    if task.number_of_cpu_quantum == 0:
        think_time = np.random.exponential(25)
        print("New job %d is initialized at %.2f with %.2f thinking time and %.2f service time"
              % (task.get_job_id(), task.env.now, think_time, task.get_service_time()))
    
        yield task.env.timeout(think_time)
        print("Job %d ready to request for CPU at %.2f." % (task.get_job_id(), task.env.now))
        task.queue_entry_time = task.env.now
        queue_count[task.queue_entry_time] =1
    with ComputeMachine.cpu.request() as request:
        print("Job %d has requested for CPU at %.2f." % (task.get_job_id(), task.env.now))
        yield request
        print('Job %d enters the queue at %.2f.' % (task.get_job_id(), task.env.now))
        status = yield task.env.process(ComputeMachine.compute(task))
        if status == 1:
            print('Job %d pre-empted at %.2f.' % (task.get_job_id(), task.env.now))
            task.set_service_time(task.get_service_time() - 0.1)
            return task.env.process(request_cpu(task, ComputeMachine))
        else:
            print('Job %d has completed its execution at %.2f.' % (task.get_job_id(),
                                                                   task.env.now))
            task.job_complete_status = True
            queue_count[task.env.now] = -1
            if len(jobs_list) < 1000:
                temp = job(env, job_id = len(jobs_list)+1,
                           service_time = np.random.exponential(0.8))
                task.env.process(request_cpu(temp, ComputeMachine))
                jobs_list[len(jobs_list)+1] = temp
Ejemplo n.º 7
0
 def add_job(self, path, arg):
     j = job()
     j.path = path
     j.args = arg
     j.status = 0
     j.name = "job" + str(len(self.jobs))
     self.jobs.append(j)
Ejemplo n.º 8
0
    def get_all_jobs(self):
        jobs = []
        jenkinsJobs = self._connection().get_jobs()
        for json in jenkinsJobs:
            jobs.append(job(json))

        return jobs
Ejemplo n.º 9
0
    def test_sorting(self):
        ref_jobs = [job(1, 0), job(2, 1), job(3, 1), job(4, 5)]
        jobs = []
        jobs.append(job(3, 1))
        jobs.append(job(1, 0))
        jobs.append(job(4, 5))
        jobs.append(job(2, 1))
#        jobs.sort()
        for r, t in zip(ref_jobs, sorted(jobs, key=attrgetter('rd', 'pt'))):
            self.assertEqual(r, t)
Ejemplo n.º 10
0
def run_submit(cmd, logstring, append, email, mailcmd):
    cmds = [cmd]
    j = job.job(logstring,
                logstring,
                email,
                cmds,
                logappend=append,
                verbose=True,
                mailcmd=mailcmd)
    j.run()
Ejemplo n.º 11
0
def getJobs():
    from job import job
    jobs = []
    for i in range(1, 30):
        respense = requests.get(url=url + '&pg=' + str(i), headers=Header)
        soup = BeautifulSoup(respense.text, 'lxml')
        time.sleep(3)
        print("----等待3s----")
        for k in soup.select('.presentation-item'):
            _job = job('', '', '', '', '', '', '')

            # _job.position = k.select('.fn-left.position').string
            #
            # _job.source = k.select('.fn-left.source').string
            #
            # _job.company = k.select('.fn-right.company').string
            #
            # _job.city = k.select('.city.fn-left').string
            #
            # _job.num = k.select('.num.fn-left').string
            #
            # _job.dateTime = k.select('.time.fn-left').string
            #
            # _job.industry = k.select('.industry.fn-right').string
            # print(_job.company)
            # print(k)
            # _job = job('', '', '', '', '', '', '')
            for j in k.select('.fn-left.position'):
                _job.position = j.string

            for j in k.select('.fn-left.source'):
                _job.source = j.string

            for j in k.select('.fn-right.company'):
                _job.company = j.string

            for j in k.select('.city.fn-left'):
                _job.city = j.string

            for j in k.select('.num.fn-left'):
                _job.num = j.string

            for j in k.select('.time.fn-left'):
                _job.dateTime = j.string

            for j in k.select('.industry.fn-right'):
                _job.industry = j.string
            jobs.append(_job)
    return jobs
    def sortJobs(self, Jobs):
        '''
        This takes a list of job references from the server, and filters them
        to get the new ones. From there is passes them them to the local job
        objects to be managed.
        '''
        newjobs = []
        for j in Jobs:
            if not (j in self.rawJobs):
                self.rawJobs.append(j)
                newjobs.append(j)

        for j in newjobs:
            self.jobs.append(job(self, j))

        if self.currentjob == None:
            self.startAJob()
    def sortJobs(self, Jobs):
        '''
        This takes a list of job references from the server, and filters them
        to get the new ones. From there is passes them them to the local job
        objects to be managed.
        '''
        newjobs = []
        for j in Jobs:
            if not (j in self.rawJobs):
                self.rawJobs.append(j)
                newjobs.append(j)

        for j in newjobs:
            self.jobs.append(job(self, j))

        if self.currentjob == None:
            self.startAJob()
Ejemplo n.º 14
0
    def test_irregal_create(self):
        pt_error_string = 'pt must be greater than zero'
        with self.assertRaises(AssertionError) as context:
            job(0, 0)
        self.assertTrue(pt_error_string in context.exception)

        rd_error_string = 'rd must be greater than or equal to zero.'
        with self.assertRaises(AssertionError) as context:
            job(1, -1)
        self.assertTrue(rd_error_string in context.exception)

        pt_int_error_string = 'pt must be int'
        with self.assertRaises(AssertionError) as context:
            job(1.1, 0)
        self.assertTrue(pt_int_error_string in context.exception)

        rd_int_error_string = 'rd must be int'
        with self.assertRaises(AssertionError) as context:
            job(1, 0.2)
        self.assertTrue(rd_int_error_string in context.exception)
Ejemplo n.º 15
0
    def process_job_list(self, data):
        ret = data.data
        lines = ret.decode("utf-8").split("\n")
        self.jobs = []
        for i in range(1, len(lines)):
            act = lines[i].split()
            if len(act) > 1:
                j = job()
                j.name = act[1]
                j.path = act[4]
                j.ip = act[5]
                j.start = act[7]
                j.stop = act[9]
                j.cpus = int(act[10])
                j.status = int(act[3])
                self.jobs.append(j)
                #print(act[0], act[1], act[2], act[3],act[4], act[5], act[6], act[7])
                #self.cluster_jobs.append([act[0], act[1], act[2], act[3],act[4], act[5], act[6], act[7]])

        self.jobs_update.emit()
Ejemplo n.º 16
0
    clusterName = None

# The user will provide the data in the JSON format.
# Call the function from_json to extract the contents from JSON file
Config_ = Config()
resourceObj, subScheduler = Config_.from_json("config.json", clusterName)

Schduler_ = scheduler()

if(subScheduler == "PBS"):
    subScheduler = PBS(Schduler_)
elif(subScheduler == "Condor"):
    subScheduler = Condor(Schduler_)

# Job object is created with initializing the parameters
Job_ = job(0, 0, inputScriptFile, resourceObj.remoteTmp, inpFiles, outFiles)
output_file = "map_jobid.csv"

# Creates the pickle object for each Job. This stores the complete information
# of the Job object.
if not os.path.isfile(output_file):
    filename = 'pickle_1'
else:
    with open(output_file, 'r+') as csvfile:
        reader = csv.reader(csvfile)
        for row in reader:
            key = row[0]
            key = int(key)
            key += 1
            filename = 'pickle_' + str(key)
            break
datadirname = sys.argv[4]  # Name of data directory
plotdirname = sys.argv[5]  # Name of plot directory

trajdotlammpstrj = sys.argv[6]  # Trajectories
testdotout = sys.argv[7]  # LAMMPS print to screen
depdotin = sys.argv[8]  # Input file

edges = int(sys.argv[9])  # The edges of interest
faces = int(sys.argv[10])  # The face threshold for the edges of interest

# Loop for each path
for item in os.walk(jobs_dir):

    path = item[0]

    split = path.split('/')

    # Filter for paths that contain jobs
    if job_name not in split[-1]:
        continue

    run = job(path, export_dir, datadirname, plotdirname)

    run.input_file(depdotin)
    run.sys(testdotout)
    run.box(trajdotlammpstrj)

    run.ico(edges=edges, faces=faces)

    print('-'*79)
Ejemplo n.º 18
0
	def run_hetero(self, t, special_input=None):
		# run name
		runname="%s, N=%i, M=%i, ni=%f, ncr=%f, dW=%f, T=%f" % (self.material, self.N, self.M, self.ni, self.ncr, self.dW, t)
		self.write_log("##############################################\n")
		self.write_log("### euorun: %s\n" % runname)
		self.write_log("##############################################\n")
		# run command
		runcmd=self.mpicmd + " -np %i " % self.np
		runcmd+=self.sp.get_runcmd_hetero(self.material, self.N, self.M, self.ni, self.ncr, self.dW, t)
		# add additional parameter
		runcmd+=self.iteration_parameter
		# add output
		runoutput=self.output + self.hdb.get_temp_output(t)
		runcmd+=" -o %s/" % runoutput

		#print "check", runcmd
		# check if run not already exist 
		runexists=self.run_exists(runcmd, runoutput, check_database=self.check_database)
		if not runexists:
			# add special input folder
			if special_input!=None:
				runcmd+=" -i %s" % (special_input)
			# search self.input folder and/or remote database for suitable input folders and add it
			else:
				if self.inputFlag:
					runcmd=database.add_input(runcmd, download_path=self.output+"/download/", path=self.input, source=self.source, input_system_name=self.input_system_name)

			if self.isoDeltaFlag:
				######################################################################################
				####### add energy shift values for the isolated system constituents #################
				######################################################################################
				# check is values of energy shifts in the isolated system already exist
				#print "check isodeltas:",  database.get_isodelta_info(runcmd)
				self.write_log("* Check isolated deltas: %s, %s, %s, %s, %s, %s, %s, %s\n"  % (database.get_isodelta_info(runcmd)[:-1]))
				(exists_left, material_left, N_left, nc_left, exists_right, material_right, N_right, nc_right, temp)=database.get_isodelta_info(runcmd)

				# if not start isolated runs
				if not exists_left or not exists_right:
					if not exists_left:
						# get name
						runname_left="%s, N=%i, ni=%f, T=%f" % (material_left, N_left, nc_left, t)
						self.write_log("* Isolated run necessary: %s\n" % runname_left)
						# get run command
						runcmd_left=self.mpicmd + " -np %i " % self.np
						runcmd_left+=self.sp.get_runcmd_isolated(material_left, N_left, nc_left, t)
						# add default additional parameter for iteration 
						runcmd_left+=self.get_default_iteration_parameter(material_left)
						# add output
						output_left=self.idb.get_output(material_left, N_left, nc_left)
						runoutput_left=output_left + self.idb.get_temp_output(t)
						runcmd_left+=" -o " + runoutput_left
						# run left system
						if not self.run_exists(runcmd_left, runoutput_left):
							# add input if existent
							runcmd_left=database.add_input(runcmd_left, download_path=output_left+"/download/", path=output_left, source=self.source)
							j=job.job(runname_left, self.log, self.email, [runcmd_left], logappend=True, verbose=self.verbose, mailcmd=self.mailcmd)
							j.run()
						# update database
						self.write_log("* Update isolated database\n")
						#print "update isolated db"
						updatecmd_left="isolated_remote.py %s" % output_left
						#subprocess.call(updatecmd_left, shell=True)
						j=job.job("update remote isolated database" , self.log, self.email, [updatecmd_left], logappend=True, verbose=False, mailcmd=self.mailcmd)
						j.run()
		
					if not exists_right:
						# get name
						runname_right="%s, N=%i, ni=%f, T=%f" % (material_right, N_right, nc_right, t)
						self.write_log("* Isolated run necessary: %s\n" % runname_right)
						# get run command
						runcmd_right=self.mpicmd + " -np %i " % self.np
						runcmd_right+=self.sp.get_runcmd_isolated(material_right, N_right, nc_right, t)
						# add default additional parameter for iteration 
						runcmd_right+=self.get_default_iteration_parameter(material_right)
						# add output
						output_right=self.idb.get_output(material_right, N_right, nc_right)
						runoutput_right=output_right + self.idb.get_temp_output(t)
						runcmd_right+=" -o " + runoutput_right
						# run right system
						if not self.run_exists(runcmd_right, runoutput_right):
							# add input if existent
							runcmd_right=database.add_input(runcmd_right, download_path=output_right+"/download/", path=output_right, source=self.source)
							j=job.job(runname_right, self.log, self.email, [runcmd_right], logappend=True, verbose=self.verbose, mailcmd=self.mailcmd)
							j.run()
						# update database
						#print "update isolated db"
						self.write_log("* Update isolated database\n")
						updatecmd_right="isolated_remote.py  %s" % output_right
						#subprocess.call(updatecmd_right, shell=True)
						j=job.job("update remote isolated database" , self.log, self.email, [updatecmd_right], logappend=True, verbose=False, mailcmd=self.mailcmd)
						j.run()
		
		
				# add isodeltas
				runcmd=database.add_isodeltas(runcmd)

			# run heterostructure job
			#print "run", runcmd
			j=job.job(runname, self.log, self.email, [runcmd], logappend=True, verbose=self.verbose, mailcmd=self.mailcmd)
			j.run()

			# update database
			if self.updatedbFlag and not runexists:
				self.write_log("* Update heterostructure database\n")
				#print "update heterostructure db"
				updatecmd="heterostructure_remote.py %s" % runoutput
				#subprocess.call(updatecmd, shell=True)
				j=job.job("update remote heterostructure database", self.log, self.email, [updatecmd], logappend=True, verbose=False, mailcmd=self.mailcmd)
				j.run()

		self.write_log("\n")
Ejemplo n.º 19
0
MODULENAME = "MAIN"


#Main file of our app
def custom_job(app, arg):
    i = 0
    while app.running:
        config.log.log(str(i))
        i = i + 1
        if (i > 5):
            break
        time.sleep(5)


def main(app):  #main function
    while app.running:  #!!!IMPORTANT!!!:life cycle should continue only while app running,otherwise app will run forever!
        s = input(">")
        if s == "help":
            print(
                "example--example showcase\nexit--quit app\nhelp--shiw this message"
            )
        if s == "exit":
            app.event(Event("$APP_QUIT", app, {"reason": "user_exit"}))
        else:
            app.event(Event("UI_COMMAND_SENT", app, {"cmd": s}))


if __name__ == "__main__":  #if we're running not as module
    App = app("app.log", main, [job(custom_job, (10, ))])  #creating app
    App.run()  #and running it
Ejemplo n.º 20
0
Archivo: main.py Proyecto: balqui/slatt
from job import job
from sys import exit

# CAVEAT: CHARACTER '/' IS ASSUMED NOT TO OCCUR AT ALL IN THE DATASET

# EXAMPLES OF USE OF THE job CLASS FOR RUNNING SLATT

# use Borgelt's apriori to compute all frequent closures
# for a dataset and a support bound (in [0,1]):
# items may be strings, not just numbers
todayjob = job("lenses_recoded", 0.001)
##todayjob = job("len",0.001)
##todayjob = job("pvotes",0.30)

##todayjob = job("pumsb_star",supp=0.4)
##todayjob = job("toyNouRay",supp=0)
##anotherjob = job("e13",0.99/13)

# compute B* basis, write the rules into a file
todayjob.run("B*", 0.75, show=False, outrules=False)

# compute representative rules, show in console and write on file
todayjob.run("RR", 0.75, show=False, outrules=True)

# compute GD basis for conf 1
todayjob.run("GD", show=False)

#to apply confidence boost filter at level 1.2 to RR
todayjob.run("RR", 0.75, boost=1.2, show=True)

#now to B*, at boost 1.05, and reducing a bit the output verbosity
Ejemplo n.º 21
0
    def run_hetero(self, t, special_input=None):
        # run name
        runname = "%s, N=%i, M=%i, ni=%f, ncr=%f, dW=%f, T=%f" % (
            self.material, self.N, self.M, self.ni, self.ncr, self.dW, t)
        self.write_log("##############################################\n")
        self.write_log("### euorun: %s\n" % runname)
        self.write_log("##############################################\n")
        # run command
        runcmd = self.mpicmd + " -np %i " % self.np
        runcmd += self.sp.get_runcmd_hetero(self.material, self.N, self.M,
                                            self.ni, self.ncr, self.dW, t)
        # add additional parameter
        runcmd += self.iteration_parameter
        # add output
        runoutput = self.output + self.hdb.get_temp_output(t)
        runcmd += " -o %s/" % runoutput

        #print "check", runcmd
        # check if run not already exist
        runexists = self.run_exists(runcmd,
                                    runoutput,
                                    check_database=self.check_database)
        if not runexists:
            # add special input folder
            if special_input != None:
                runcmd += " -i %s" % (special_input)
            # search self.input folder and/or remote database for suitable input folders and add it
            else:
                if self.inputFlag:
                    runcmd = database.add_input(
                        runcmd,
                        download_path=self.output + "/download/",
                        path=self.input,
                        source=self.source,
                        input_system_name=self.input_system_name)

            if self.isoDeltaFlag:
                ######################################################################################
                ####### add energy shift values for the isolated system constituents #################
                ######################################################################################
                # check is values of energy shifts in the isolated system already exist
                #print "check isodeltas:",  database.get_isodelta_info(runcmd)
                self.write_log(
                    "* Check isolated deltas: %s, %s, %s, %s, %s, %s, %s, %s\n"
                    % (database.get_isodelta_info(runcmd)[:-1]))
                (exists_left, material_left, N_left, nc_left, exists_right,
                 material_right, N_right, nc_right,
                 temp) = database.get_isodelta_info(runcmd)

                # if not start isolated runs
                if not exists_left or not exists_right:
                    if not exists_left:
                        # get name
                        runname_left = "%s, N=%i, ni=%f, T=%f" % (
                            material_left, N_left, nc_left, t)
                        self.write_log("* Isolated run necessary: %s\n" %
                                       runname_left)
                        # get run command
                        runcmd_left = self.mpicmd + " -np %i " % self.np
                        runcmd_left += self.sp.get_runcmd_isolated(
                            material_left, N_left, nc_left, t)
                        # add default additional parameter for iteration
                        runcmd_left += self.get_default_iteration_parameter(
                            material_left)
                        # add output
                        output_left = self.idb.get_output(
                            material_left, N_left, nc_left)
                        runoutput_left = output_left + self.idb.get_temp_output(
                            t)
                        runcmd_left += " -o " + runoutput_left
                        # run left system
                        if not self.run_exists(runcmd_left, runoutput_left):
                            # add input if existent
                            runcmd_left = database.add_input(
                                runcmd_left,
                                download_path=output_left + "/download/",
                                path=output_left,
                                source=self.source)
                            j = job.job(runname_left,
                                        self.log,
                                        self.email, [runcmd_left],
                                        logappend=True,
                                        verbose=self.verbose,
                                        mailcmd=self.mailcmd)
                            j.run()
                        # update database
                        self.write_log("* Update isolated database\n")
                        #print "update isolated db"
                        updatecmd_left = "isolated_remote.py %s" % output_left
                        #subprocess.call(updatecmd_left, shell=True)
                        j = job.job("update remote isolated database",
                                    self.log,
                                    self.email, [updatecmd_left],
                                    logappend=True,
                                    verbose=False,
                                    mailcmd=self.mailcmd)
                        j.run()

                    if not exists_right:
                        # get name
                        runname_right = "%s, N=%i, ni=%f, T=%f" % (
                            material_right, N_right, nc_right, t)
                        self.write_log("* Isolated run necessary: %s\n" %
                                       runname_right)
                        # get run command
                        runcmd_right = self.mpicmd + " -np %i " % self.np
                        runcmd_right += self.sp.get_runcmd_isolated(
                            material_right, N_right, nc_right, t)
                        # add default additional parameter for iteration
                        runcmd_right += self.get_default_iteration_parameter(
                            material_right)
                        # add output
                        output_right = self.idb.get_output(
                            material_right, N_right, nc_right)
                        runoutput_right = output_right + self.idb.get_temp_output(
                            t)
                        runcmd_right += " -o " + runoutput_right
                        # run right system
                        if not self.run_exists(runcmd_right, runoutput_right):
                            # add input if existent
                            runcmd_right = database.add_input(
                                runcmd_right,
                                download_path=output_right + "/download/",
                                path=output_right,
                                source=self.source)
                            j = job.job(runname_right,
                                        self.log,
                                        self.email, [runcmd_right],
                                        logappend=True,
                                        verbose=self.verbose,
                                        mailcmd=self.mailcmd)
                            j.run()
                        # update database
                        #print "update isolated db"
                        self.write_log("* Update isolated database\n")
                        updatecmd_right = "isolated_remote.py  %s" % output_right
                        #subprocess.call(updatecmd_right, shell=True)
                        j = job.job("update remote isolated database",
                                    self.log,
                                    self.email, [updatecmd_right],
                                    logappend=True,
                                    verbose=False,
                                    mailcmd=self.mailcmd)
                        j.run()

                # add isodeltas
                runcmd = database.add_isodeltas(runcmd)

            # run heterostructure job
            #print "run", runcmd
            j = job.job(runname,
                        self.log,
                        self.email, [runcmd],
                        logappend=True,
                        verbose=self.verbose,
                        mailcmd=self.mailcmd)
            j.run()

            # update database
            if self.updatedbFlag and not runexists:
                self.write_log("* Update heterostructure database\n")
                #print "update heterostructure db"
                updatecmd = "heterostructure_remote.py %s" % runoutput
                #subprocess.call(updatecmd, shell=True)
                j = job.job("update remote heterostructure database",
                            self.log,
                            self.email, [updatecmd],
                            logappend=True,
                            verbose=False,
                            mailcmd=self.mailcmd)
                j.run()

        self.write_log("\n")
Ejemplo n.º 22
0
def genjobs():
    nin = job('NIN', 'DPS', True, 2.4, .7, .15, True)
    drg = job('DRG', 'DPS', True, 2.4, .7, .15, False)
    mnk = job('MNK', 'DPS', False, 2.4, .7, .15, False)
    sam = job('SAM', 'DPS', False, 2.4, .7, .15, False)

    brd = job('BRD', 'DPS', False, 2.4, 0, .15, False)
    mch = job('MCH', 'DPS', False, 2.4, 0, .15, False)

    rdm = job('RDM', 'DPS', True, 2.4, 0, .15, False)
    smn = job('SMN', 'DPS', False, 2.4, 0, .15, False)
    blm = job('BLM', 'DPS', False, 2.4, 0, .15, False)

    sch = job('SCH', 'HEAL', True, 2.4, 0, .15, False)
    ast = job('AST', 'HEAL', True, 2.4, 0, .15, False)
    whm = job('WHM', 'HEAL', False, 2.4, 0, .15, False)

    gnb = job('GNB', 'TANK', True, 2.4, 0, .15, False)
    war = job('WAR', 'TANK', True, 2.4, 0, .15, False)
    drk = job('DRK', 'TANK', False, 2.4, 0, .15, False)
    pld = job('PLD', 'TANK', False, 2.4, 0, .15, False)

    table = [
        nin, drg, mnk, sam, brd, mch, rdm, smn, blm, sch, ast, whm, gnb, war,
        drk, pld
    ]

    dict = {}
    for i in table:
        dict[i.name] = i

    return dict
Ejemplo n.º 23
0
def test(target=None, t=None, cluster='.*', c=None, fabrun='.*', f=None,
         pipeline_pattern='.*', p='.*', fablib=None, l=None):
    target = t if t is not None else target
    cluster = c if c is not None else cluster
    re_cluster = re.compile(cluster)
    fabrun = f if f is not None else fabrun
    fablib = l if l is not None else fablib
    pipeline_pattern = p if p is not None else pipeline_pattern

    if target is None and fablib is None:
        print 'Bad args.'
        print 'Prease set test target (t=xxx) or fablib target (l=yyy).'
        print test.__doc__
        return

    sys.path.remove(CONF._repo_dir)

    FABTEST_DIR = os.path.dirname(os.path.abspath(__file__))
    if fablib is not None:
        fablib_dir = os.path.join(CONF._fablib_module_dir, fablib)
        CONF._repo_dir = os.path.join(fablib_dir, 'test-repo')
    else:
        CONF._repo_dir = os.path.join(FABTEST_DIR, 'test-repo')

    EX_CONF = cfg.ConfigOpts()
    conf_file = os.path.join(CONF._repo_dir, 'fabfile.ini')
    if os.path.exists(conf_file):
        EX_CONF([], default_config_files=[conf_file])
    else:
        EX_CONF([])

    from fabkit.conf import conf_base, conf_fabric, conf_web, conf_test  # noqa
    EX_CONF.register_opts(conf_base.default_opts)
    EX_CONF.register_opts(conf_test.test_opts, group="test")

    CONF._storage_dir = os.path.join(CONF._repo_dir, 'storage')
    CONF._databag_dir = os.path.join(CONF._repo_dir, 'databag')
    CONF._tmp_dir = os.path.join(CONF._storage_dir, 'tmp')
    CONF._log_dir = os.path.join(CONF._storage_dir, 'log')
    CONF._node_dir = os.path.join(CONF._repo_dir, 'nodes')
    CONF._node_meta_pickle = os.path.join(CONF._node_dir, 'meta.pickle')
    CONF._fabscript_module_dir = os.path.join(CONF._repo_dir, 'fabscript')
    CONF._fablib_module_dir = os.path.join(CONF._repo_dir, 'fablib')

    sys.path.extend([
        CONF._repo_dir,
    ])

    CONF.fablib = EX_CONF.fablib
    CONF.test.fablib = EX_CONF.test.fablib
    CONF.test.clusters = EX_CONF.test.clusters

    util.create_required_dirs()
    util.git_clone_required_fablib(is_test=True)

    CONF._unittests_dir = os.path.join(FABTEST_DIR, 'unittests')

    if target is None or target == 'fab':
        CONF.user = CONF.test.user
        CONF.password = CONF.test.password
        conf_fabric.init()

        env.forward_agent = False
        env.disable_known_hosts = True

        env.tasks.append('node:{0}'.format(cluster))
        env.tasks.append("job:local,'{0}'".format(pipeline_pattern))

        for cluster in CONF.test.clusters:
            if re_cluster.search(cluster):
                node(cluster, 'yes')
                job('local', pipeline_pattern, f=fabrun)

    if fablib is None:
        # Test fabkit
        if target == 'all':
            suites = unittest.TestLoader().discover(CONF._unittests_dir,
                                                    pattern='test_*')
        else:
            suites = unittest.TestLoader().discover(CONF._unittests_dir,
                                                    pattern='test_{0}*'.format(target))

        alltests = unittest.TestSuite(suites)
        result = unittest.TextTestRunner(verbosity=2).run(alltests)

        exit(len(result.errors) + len(result.failures))
Ejemplo n.º 24
0
    def init(NUM_INSTANCE=0):
        constantes.NUM_INSTANCE = NUM_INSTANCE
        ##path ver s les instances
        constantes.INSTANCE = constantes.LISTE_INSTANCE[NUM_INSTANCE]
        #chargement de la classe data correspondant au nom de l'instance
        data = imp.load_source(
            constantes.INSTANCE,
            constantes.PATH_INSTANCE + constantes.INSTANCE + ".py")
        data_2 = data.data
        ##definition de l'entete timestamp du fichier log
        time_temp = time.ctime()
        header_time = (time_temp.split(":")[0]).split(" ")
        try:
            header_time.remove("")
        except:
            pass
        header_time.append(time_temp.split(":")[1])
        header_time2 = ""
        header_time2 += header_time[0] + "_" + header_time[
            1] + "_" + header_time[2] + "_"
        header_time2 += header_time[3] + ":" + header_time[4] + "_"
        constantes.header_time = header_time2
        ###############################################################################
        #########################definition constantes de l'instance###################
        ##extraction des constantes
        constantes.nbJOB = data_2["nbJOB"]
        constantes.tics = data_2["tics"]
        constantes.nbTIC = data_2["nbTIC"]
        constantes.speed = data_2["speed"]
        constantes.nbCMP = data_2["nbCMP"]
        constantes.name = data_2["name"]
        constantes.jobs = data_2["jobs"]

        ###############################################################################
        #########################initialisation des listes d'objet#####################
        ##liste de techniciens
        #print('*** creation liste des tics')
        constantes.TICS = [tic(t) for t in constantes.tics]
        #for t in TICS: print(t)

        ##liste des jobs
        #print('*** creation liste des jobs')
        constantes.JOBS = [job(j) for j in constantes.jobs]
        #for j in JOBS: print(j)

        ##dico tic pour acces rapide
        constantes.dico_tic = {}
        for t in constantes.TICS:
            constantes.dico_tic[t.id] = t
        tic.dico_tic = constantes.dico_tic

        ##dico job pour acces rapide
        constantes.dico_job = {}
        for j in constantes.JOBS:
            constantes.dico_job[j.id] = j
        job.dico_job = constantes.dico_job

        #job.speed
        job.speed = constantes.speed

        ###############################################################################
        ##########################constantes calculées#################################
        ##distance entre deux jobs
        constantes.distance = {}
        for job1 in constantes.JOBS:
            for job2 in constantes.JOBS:
                constantes.distance[(job1.id,
                                     job2.id)] = job.distance(job1, job2)

        ##duree entre deux jobs pour faire le trajet
        constantes.temps = {}
        for job1 in constantes.JOBS:
            for job2 in constantes.JOBS:
                constantes.temps[(job1.id, job2.id)] = job.temps(job1, job2)
Ejemplo n.º 25
0
def execute(p):
    j = None
    if len(p.commands) > 1:

        # iterate over all the commands that are piped
        for i in range(len(p.commands)):

            # first command in the pipeline
            if (i == 0):
                proc = None

                # checks if there is input redirection
                if p.input != "":
                    fd = os.open(p.input, os.O_RDONLY)
                    proc = subprocess.Popen(p.commands[i],
                                            preexec_fn=os.setpgrp,
                                            stdin=fd,
                                            stdout=subprocess.PIPE)
                # else runs it normally
                else:
                    proc = subprocess.Popen(p.commands[i],
                                            preexec_fn=os.setpgrp,
                                            stdout=subprocess.PIPE)
                j = joblist.add_job(p.fg)
                j.add_process(proc)
            # deals with last command in pipeline
            elif (i == len(p.commands) - 1):
                in_proc = j.processes[i - 1]
                proc = None
                # checks for output redirection and deals with it accordingly
                if p.output != "":
                    fd_out = os.open(p.output, os.O_CREAT | os.O_WRONLY)
                    proc = subprocess.Popen(p.commands[i],
                                            preexec_fn=os.setpgrp,
                                            stdin=in_proc.subprocess.stdout,
                                            stdout=fd_out)
                # else runs last command normally
                else:
                    proc = subprocess.Popen(p.commands[i],
                                            preexec_fn=os.setpgrp,
                                            stdin=in_proc.subprocess.stdout)
                j.add_process(proc)
            # if command is not last or first in the pipeline
            else:
                in_proc = j.processes[i - 1]
                proc = subprocess.Popen(p.commands[i],
                                        preexec_fn=os.setpgrp,
                                        stdin=in_proc.subprocess.stdout,
                                        stdout=subprocess.PIPE)
                j.add_process(proc)
    else:
        # creates singular process within a job
        proc = None
        # both input and output redirection case
        if p.input != "" and p.output != "":
            fd_in = os.open(p.input, os.O_RDONLY)
            fd_out = os.open(p.output, os.O_CREAT | os.O_WRONLY)
            proc = subprocess.Popen(p.commands[0],
                                    preexec_fn=os.setpgrp,
                                    stdin=fd_in,
                                    stdout=fd_out)

        # just input redirection
        elif p.input != "":
            fd = os.open(p.input, os.O_RDONLY)
            proc = subprocess.Popen(p.commands[0],
                                    preexec_fn=os.setpgrp,
                                    stdin=fd)

        # just output redirection
        elif p.output != "":
            fd_out = os.open(p.output, os.O_CREAT | os.O_WRONLY)
            proc = subprocess.Popen(p.commands[0],
                                    preexec_fn=os.setpgrp,
                                    stdout=fd_out)

        # no input or output redirection
        else:
            proc = subprocess.Popen(p.commands[0], preexec_fn=os.setpgrp)
        j = job(p.fg)
        j.add_process(proc)
        joblist.jobs.append(j)

    while (joblist.has_fg_job()):
        # repeatedly checking if any child process has finished
        time.sleep(.01)
        child_handler()
Ejemplo n.º 26
0
def test(target=None,
         t=None,
         cluster='.*',
         c=None,
         fabrun='.*',
         f=None,
         pipeline_pattern='.*',
         p='.*',
         fablib=None,
         l=None):
    target = t if t is not None else target
    cluster = c if c is not None else cluster
    re_cluster = re.compile(cluster)
    fabrun = f if f is not None else fabrun
    fablib = l if l is not None else fablib
    pipeline_pattern = p if p is not None else pipeline_pattern

    if target is None and fablib is None:
        print 'Bad args.'
        print 'Prease set test target (t=xxx) or fablib target (l=yyy).'
        print test.__doc__
        return

    sys.path.remove(CONF._repo_dir)

    FABTEST_DIR = os.path.dirname(os.path.abspath(__file__))
    if fablib is not None:
        fablib_dir = os.path.join(CONF._fablib_module_dir, fablib)
        CONF._repo_dir = os.path.join(fablib_dir, 'test-repo')
    else:
        CONF._repo_dir = os.path.join(FABTEST_DIR, 'test-repo')

    EX_CONF = cfg.ConfigOpts()
    conf_file = os.path.join(CONF._repo_dir, 'fabfile.ini')
    if os.path.exists(conf_file):
        EX_CONF([], default_config_files=[conf_file])
    else:
        EX_CONF([])

    from fabkit.conf import conf_base, conf_fabric, conf_web, conf_test  # noqa
    EX_CONF.register_opts(conf_base.default_opts)
    EX_CONF.register_opts(conf_test.test_opts, group="test")

    CONF._storage_dir = os.path.join(CONF._repo_dir, 'storage')
    CONF._databag_dir = os.path.join(CONF._repo_dir, 'databag')
    CONF._tmp_dir = os.path.join(CONF._storage_dir, 'tmp')
    CONF._log_dir = os.path.join(CONF._storage_dir, 'log')
    CONF._node_dir = os.path.join(CONF._repo_dir, 'nodes')
    CONF._node_meta_pickle = os.path.join(CONF._node_dir, 'meta.pickle')
    CONF._fabscript_module_dir = os.path.join(CONF._repo_dir, 'fabscript')
    CONF._fablib_module_dir = os.path.join(CONF._repo_dir, 'fablib')

    sys.path.extend([
        CONF._repo_dir,
    ])

    CONF.fablib = EX_CONF.fablib
    CONF.test.fablib = EX_CONF.test.fablib
    CONF.test.clusters = EX_CONF.test.clusters

    util.create_required_dirs()
    util.git_clone_required_fablib(is_test=True)

    CONF._unittests_dir = os.path.join(FABTEST_DIR, 'unittests')

    if target is None or target == 'fab':
        CONF.user = CONF.test.user
        CONF.password = CONF.test.password
        conf_fabric.init()

        env.forward_agent = False
        env.disable_known_hosts = True

        env.tasks.append('node:{0}'.format(cluster))
        env.tasks.append("job:local,'{0}'".format(pipeline_pattern))

        for cluster in CONF.test.clusters:
            if re_cluster.search(cluster):
                node(cluster, 'yes')
                job('local', pipeline_pattern, f=fabrun)

    if fablib is None:
        # Test fabkit
        if target == 'all':
            suites = unittest.TestLoader().discover(CONF._unittests_dir,
                                                    pattern='test_*')
        else:
            suites = unittest.TestLoader().discover(
                CONF._unittests_dir, pattern='test_{0}*'.format(target))

        alltests = unittest.TestSuite(suites)
        result = unittest.TextTestRunner(verbosity=2).run(alltests)

        exit(len(result.errors) + len(result.failures))
Ejemplo n.º 27
0
                                                                   task.env.now))
            task.job_complete_status = True
            queue_count[task.env.now] = -1
            if len(jobs_list) < 1000:
                temp = job(env, job_id = len(jobs_list)+1,
                           service_time = np.random.exponential(0.8))
                task.env.process(request_cpu(temp, ComputeMachine))
                jobs_list[len(jobs_list)+1] = temp
###############################################################################
"""
Execution
"""
###############################################################################
np.random.seed(RANDOM_SEED) # This helps reproducing the results
# Create an environment and start the setup process
jobs_list = {}
queue_count = {}
env = simpy.Environment()
ComputeMachine = CommmonMachine(env, NUM_CPUS, CPU_SLOT, CPU_OVERHEAD)
# Initialize first (num_terminals) jobs
for i in range(NUM_TERMINALS):
    temp = job(env, job_id = i+1, service_time = np.random.exponential(0.8))
    env.process(request_cpu(temp, ComputeMachine))
    jobs_list[i+1] = temp
env.run()
##############################################################################
#statistics  
response_mean, response_max, response_array  = average_response(jobs_list)
cum_queue = queue_time_average(queue_count)
cpu_utilization = cpu_utilization(ComputeMachine, jobs_list)
Ejemplo n.º 28
0
 def test_srpt_2(self):
     jobs = [job(2, 1), job(1, 4), job(4, 1)]
     ref = [[[1, 3]], [[3, 4], [5, 8]], [[4, 5]]]
     sched = srpt(jobs)
     for i, j in enumerate(sched.jobs):
         self.assertEqual(j.ct, ref[i])
Ejemplo n.º 29
0
def main():
    logger.info("Archiveboxmatic is running.")
    args = parse_args()
    config = read_config(args.config_file)
    # Check if config is valid with the selected arguments.
    if not validate_config(config, args):
        raise Exception("Config is not valid.")

    allowed_schedules = ([args.schedule] if args.schedule != "all" else
                         ["daily", "weekly", "monthly", "yearly", "none"])

    global_config = config["archivebox"]

    def run_threaded(job_func, args, global_config, archive_config):
        job_thread = threading.Thread(
            target=job_func, args=[args, global_config, archive_config])
        job_thread.start()

    for i in config["archives"]:
        if "schedule" in i:
            if i["schedule"] in allowed_schedules:
                if i["schedule"] == "daily":
                    schedule.every().day.at("12:00").do(
                        run_threaded,
                        job,
                        args=args,
                        global_config=global_config,
                        archive_config=i,
                    )
                elif i["schedule"] == "weekly":
                    schedule.every().monday.at("10:00").do(
                        run_threaded,
                        job,
                        args=args,
                        global_config=global_config,
                        archive_config=i,
                    )
                elif i["schedule"] == "monthly":
                    schedule.every().day.at("05:00").do(
                        run_threaded,
                        job_monthly,
                        args=args,
                        global_config=global_config,
                        archive_config=i,
                    )
                elif i["schedule"] == "yearly":
                    schedule.every().day.at("01:00").do(
                        run_threaded,
                        job_yearly,
                        args=args,
                        global_config=global_config,
                        archive_config=i,
                    )
                else:
                    job(args, global_config, i)
            else:
                logger.warning(f"Schedule {i['schedule']} not allowed.")
        elif "none" in allowed_schedules:
            job(args, global_config, i)
        else:
            logger.warning("Schedule none not allowed.")

    while True:
        schedule.run_pending()
        time.sleep(600)
        logger.debug(
            f"Next job: {schedule.next_run() - datetime.datetime.now()}.")
Ejemplo n.º 30
0
 def test_srpt_text_jobs(self):
     jobs = [job(2, 0), job(1, 4), job(4, 1)]
     ref = [[[0, 2]], [[2, 4], [5, 7]], [[4, 5]]]
     sched = srpt(jobs)
     for i, j in enumerate(sched.jobs):
         self.assertEqual(j.ct, ref[i])
Ejemplo n.º 31
0
 def add_job(self, fg):
     self.jobs.append(job(fg))
     return self.jobs[len(self.jobs) - 1]
Ejemplo n.º 32
0
def run_submit(cmd, logstring, append, email, mailcmd):
	cmds=[cmd]
	j=job.job(logstring, logstring, email, cmds, logappend=append, verbose=True, mailcmd=mailcmd)
	j.run()
Ejemplo n.º 33
0
from job import job
from sys import exit

# CAVEAT: CHARACTER '/' IS ASSUMED NOT TO OCCUR AT ALL IN THE DATASET

# EXAMPLES OF USE OF THE job CLASS FOR RUNNING SLATT
# FOR REPRESENTATIVE RULES MINING

# use Borgelt's apriori to compute all frequent closures
# for a dataset and a support bound (in [0,1]):
# items may be strings, not just numbers
todayjob = job("./datasets/test",0.4)
##todayjob = job(".datasets/retail",0.0005)
##todayjob = job(".datasets/retail",0.001)
##todayjob = job(".datasets/adult",0.01)
##todayjob = job(".datasets/adult",0.005)
##todayjob = job(".datasets/accidents",0.5)
##todayjob = job(".datasets/accidents",0.4)


# compute representative rules with Kryszkiewicz incomplete heuristic,
# write the rules into a file 
todayjob.run("GenRR",0.7,show=False,outrules=True)

# compute B* basis, show in console and do not write on file 
todayjob.run("RRGenerator",0.8,show=True,outrules=False)

# compute representative rules, show in console and write on file
todayjob.run("RRClosureGenerator",0.9,show=True,outrules=True)

Ejemplo n.º 34
0
    path = item[0]

    # Filter for paths that contain jobs
    if 'job' not in path:
        continue
    if datadirname in path:
        continue
    if plotdirname in path:
        continue
    if 'minimization' in path:
        continue

    error = False
    try:
        run = job(path)
    except Exception:
        error = True
        pass

    try:
        run.apd()
    except Exception:
        error = True
        pass

    try:
        run.etg()
    except Exception:
        error = True
        pass
Ejemplo n.º 35
0
                else:
                    j = heapq.heappop(self.__stack)
                    j.ct.append([self.__t, self.__t + j.pt])
                    self.__t += j.pt
                self.__stacking()
        self.__print()

    def draw(self, fname=None):
        for j in jobs:
            j.draw()
        plt.gca().set_ylim([0, 2])
        plt.gca().margins(0.1)
        if fname:
            plt.savefig(fname, bbox_inches='tight')
        else:
            plt.show()


if __name__ == '__main__':
    jobs = []
    c = mc.values()
    n = len(c) - 1
    random.seed(0)
    jobs.append(job(2, 0, c[random.randint(0, n)]))
    jobs.append(job(1, 4, c[random.randint(0, n)]))
    jobs.append(job(4, 1, c[random.randint(0, n)]))
    sched = srpt(jobs)
    for j in sched.jobs:
        print j.ct
    sched.draw('srpt_for_js1.png')