def read_trace(filename, numplayers): jobs = [] # using systems terminology f = open(filename) lines = f.readlines() numlines = len(lines) jobid = 1 lineno = 0 while lineno < numlines: parts = lines[lineno].strip().split(":") if parts[0].strip() != "j": print " Something wrong with the input file" exit() starttime = int(parts[1].strip()) numtasks = int(parts[2].strip()) j = Job(jobid, starttime, numtasks) j.add_tasks(lines[lineno + 1: lineno + numtasks + 1]) jobs.append(j) jobid += 1 lineno += (numtasks + 1) if jobid > numplayers: break print "Finished reading jobs " for job in jobs: (avgmem, stdmem) = job.get_agg_mem() (avgcpu, stdcpu) = job.get_agg_cpu() #print job.id, job.numtasks, int(avgmem / (1024 * 1024)), int(stdmem / (1024 * 1024)), job.get_agg_cpu()[0], job.get_duration() f.close() return jobs
def search_job_by_deadline(jobs,start,end,cursor1,cursor2): cursor1.execute(""" select a.Workno, g.GoodNo, g.GoodCd, i.GoodCd as 'RawMaterialCd', m4.Minorcd, a.OrderQty , a.DeliveryDate, --case when i.Class3 = '061038' then '단조' else 'HEX' end as Gubun, case when m3.minorNm = 'Forging' then 0 when m3.minorNm = 'Hex Bar' then 1 when m3.minorNm = 'Round Bar' then 2 when m3.MinorNm = 'Square Bar' then 3 when m3.MinorNm = 'VALVE 선작업' then 4 end as RawMaterialGubun, -- 소재사이즈 ISNULL(i.Size, 0) as RawMaterialSize, -- LOK FITTING 유무 case when g.Class3 = '061001' then 'Y' else 'N' end as LOKFITTINGYN, --LOK FITTING -- LOK FITTING 일때 반제품 품번 사이즈에 따른 기계배정 case when g.Class3 = '061001' and ((LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),3) = '-1-') or (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),3) = '-2-') or (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),3) = '-3-') or (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),4) = '-2M-') or (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),4) = '-3M-') or (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),4) = '-4M-')) then 'Y' else 'N' end as LOKFITTINGSIZEYN, a.CloseDate, a.Credate, a.Moddate from TWorkreport_Han_Eng a inner join TGood g on a.Goodcd = g.GoodCd inner join TGood i on a.Raw_Materialcd = i.GoodCd left outer join TMinor m3 on i.Class3 = m3.MinorCd left outer join TMinor m4 on g.Class4 = m4.MinorCd where DeliveryDate between '20190101' and '20190131' and PmsYn = 'N' and ContractYn = '1' -- 단조 Hex Bar Round Bar Square Bar VALVE 선작업 and i.Class3 in ('061038', '061039', '061040', '061048', '061126') order by DeliveryDate """) row = cursor1.fetchone() while row: temp = Job.Job() temp2 = Job.Job() temp.setWorkNo(row[0]) temp.setGoodNo(row[1]) temp.setGoodCd(row[2]) goodcd = row[2] temp.setRawmaterialCd(row[3]) temp.setOrderQty(row[5]) temp.setDeliveryDate(row[6]) temp.setRawmaterialGubun(row[7]) temp.setSpec(row[8]) temp.setLOKFitting(row[9]) temp.setLOKFittingSize(row[10]) temp.setCycletime(search_cycle_time(cursor2, goodcd)) if(len(temp.getCycletime()) ==0): row = cursor1.fetchone() else: jobs.append(temp) row = cursor1.fetchone() total_number = len(jobs) print("the total # of job : %d"%(total_number))
def start(self, count): print '\nFetching jobs from indeed.com' urlKeyword = "Computer Science" webURL = "http://www.indeed.com/jobs?q=" + urlKeyword + "&start=" for page in range(1, 2): page = (page - 1) * 10 url = "%s%d" % (webURL, page) target = Soup(urllib.urlopen(url), "html.parser") targetElements = target.findAll('div', attrs={'class': ' row result'}) if targetElements == []: break for element in targetElements: try: # creating a job instance to store details like job title, company, address, JobLink job = Job() company = element.find('span', attrs={'class': 'company'}) if company != None: job.companyName = company.getText().strip() title = element.find('a', attrs={'class': 'turnstileLink'}).attrs['title'] if title != None: job.jobTitle = title.strip() addr = element.find('span', attrs={'class': 'location'}) if addr != None: job.address = addr.getText().strip() job.homeURL = "http://www.indeed.com" job.jobLink = "%s%s" % (job.homeURL, element.find('a').get('href')) skillsElement = element.find('span', attrs={'class': 'experienceList'}) job.skills = self.utils.clean_process_summary(skillsElement) summaryElement = element.find('span', attrs={'class': 'summary'}) job.summary = self.utils.clean_process_summary(summaryElement) if ((job.jobLink != "") and (job.jobLink != None)): joburl = urllib.quote(job.jobLink.encode('utf8'), ':/') joblinkTarget = Soup(urllib.urlopen(joburl), "html.parser") summaryElement = joblinkTarget.find('span', attrs={'class': 'summary'}) job.summary.extend(self.utils.clean_process_summary(summaryElement)) if (job.jobTitle != None and job.jobLink != None): self.jobsFetched.append(job) job.id = count count += 1 # job.printDetails() except Exception as e: print e.message continue print "No. of jobs fetched: " + str(len(self.jobsFetched)) print 'Fetching jobs from indeed.com completed.' return self.jobsFetched
def delete_job(): if session.get("user_id") is None: return "You must be logged in to delete this job!" json_data = request.get_json() job_uuid = json_data["jobId"] Job.deleteJob(job_uuid) return "Deleted Job " + job_uuid
def load_jobs(self, filename): '''Loads jobs from input file.''' with open(filename, 'r') as file: for line in file: if not line.startswith('JOB,PRIORITY,LENGTH'): data = line.rstrip().split(',') job = Job(data[0], int(data[1]), int(data[2])) self.__jobHeap.add((job.get_priority(), job.get_length()), job) file.close()
def addheartbeatjobtoq(self): while True: tools = Tools.globalinfotool() for host in iter(tools.getallhost()): heartbeatjob = Job.Heartbeat(host) perfjob = Job.Perf(host) self.__globalq.put(heartbeatjob) self.__globalq.put(perfjob) time.sleep(self.pollinginterval)
def cancel_job(): print("Received Cancel Request") if session.get("user_id") is None: return "You must be logged in to cancel this job!" json_data = request.get_json() jobId = json_data["jobId"] print("Canceling Job " + jobId) Job.cancelJob(jobId) return "Canceled Job " + jobId
def prepare_jobs(self): self.jobs = list() if self.single_file_list: if not os.path.isfile(self.single_file_list): raise FileNotFoundError("Couldn't find the bed file list ", self.single_file_list) command = '{executable} -i {bed_file_list} -o {merged_bed_file}'.\ format(executable = self.executable , bed_file_list = self.single_file_list, merged_bed_file = self.bp_candidates_bed_file ) job = Job.Job(command=command, output_directory=self.cluster_out_directory, job_name="final_bed_merge", time_limit=120, cores=1, memory=4096) self.jobs.append(job) return 0 for directory in self.input_files: output_directory = os.path.join(self.output_directory, os.path.basename(directory)) os.makedirs(output_directory, exist_ok=True) lib_bp_bed_files_list = os.path.join( directory, "candidate_bp_files_list.txt") merged_bed_file = os.path.join(output_directory, 'candidate_branchpoints.bed') self.list_of_merged_files.append(merged_bed_file) if not os.path.isfile(lib_bp_bed_files_list): raise FileNotFoundError("Could not find the bed file list", lib_bp_bed_files_list) command = '{executable} -i {bed_file_list} -o {merged_bed_file}'.\ format(executable = self.executable , bed_file_list = lib_bp_bed_files_list, merged_bed_file = merged_bed_file ) job = Job.Job(command=command, output_directory=self.cluster_out_directory, job_name=os.path.basename(directory), time_limit=120, cores=1, memory=4096) self.jobs.append(job) with open(self.merged_files, 'w') as list_stream: for bed_file in self.list_of_merged_files: print(bed_file, file=list_stream)
def deleteUser(user_id): try: Job.deleteJobsForUser(user_id) except: return "Couldn't delete user's job files" with Database.pool.get_connection() as connection: with connection.cursor() as cursor: cursor.execute(remove_user, (user_id)) return "User has been deleted"
def start(self, count): print '\nFetching jobs from dice.com' webURL = "https://www.dice.com/jobs?q=Computer+Science&l=San+Jose%2C+CA" for page in range(1, 2): page = (page - 1) * 10 url = "%s%d" % (webURL, page) target = Soup(urllib.urlopen(url), "html.parser") targetElements = target.findAll( 'div', attrs={'class': 'complete-serp-result-div'}) if targetElements == []: break for element in targetElements: # creating a job instance to store details like job title, company, address, JobLink job = Job() title = element.find('span', attrs={'itemprop': 'title'}) if title != None: job.jobTitle = title.getText().strip() company = element.find('span', attrs={'class': 'compName'}) if company != None: job.companyName = company.getText().strip() addr = element.find('span', attrs={'class': 'jobLoc'}) if addr != None: job.address = addr.getText().strip() job.homeURL = "https://www.dice.com" sub = element.find('a', attrs={ 'itemprop': 'url' }).attrs['href'] job.jobLink = "%s%s" % (job.homeURL, sub) if ((job.jobLink != "") and (job.jobLink != None)): # joburl = urllib.quote(job.jobLink.encode('utf8'), ':/') joblinkTarget = Soup(urllib.urlopen(job.jobLink), "html.parser") summaryElement = joblinkTarget.find( 'div', attrs={'itemprop': 'description'}) job.summary = self.utils.clean_process_summary( summaryElement) if (job.jobTitle != None and job.jobLink != None and job.summary != []): self.jobsFetched.append(job) job.id = count count += 1 # job.printDetails() print "No. of jobs fetched: " + str(len(self.jobsFetched)) print 'Fetching jobs from dice.com completed.' return self.jobsFetched
def __init__(self, kLookAhead, inputFile, numberOfMachines): self.numberOfMachines = numberOfMachines self.readFile = open(inputFile, 'r') self.jobs = [] self.lookAhead = kLookAhead self.sumJobTime = 0.0 line = self.readFile.readline() self.jobs = self.readFile.readlines() self.readFile.close() for i in range(len(self.jobs)): self.jobs[i] = Job.Job(int(float(self.jobs[i].replace('\n', ''))), self.numberOfMachines) self.sumJobTime += float(self.jobs[i]) self.MAXJOB = Job.Job(0, self.numberOfMachines)
def queueJobInQueue(self, queue, quota_names, func, context, *args, **kwargs): """Queue a job in the specified queue.""" portal = getUtility(ISiteRoot) portal_path = portal.getPhysicalPath() context_path = context.getPhysicalPath() uf_path, user_id = _getAuthenticatedUser() job = Job(_executeAsUser, context_path, portal_path, uf_path, user_id, func, *args, **kwargs) if quota_names: job.quota_names = quota_names job = queue.put(job) job.addCallbacks(success=job_success_callback, failure=job_failure_callback) return job
def get_job_data(job_id): if session.get("user_id") is None: return redirect("/login") job_data = Job.getJobForUserId(job_id, session.get("user_id")) associated_jobs = Job.getAssociatedJobs(job_data["uuid"]) if (job_data is not None): return jsonify({ "job_data": [job_data], "associated_jobs": associated_jobs }) else: return "No job data."
def get_job_data(job_id): user = Job.getFirstNameForUuid(job_id) if session.get("user_id") is None and user != "Guest": return redirect("/login") job_data = Job.getJobFromUuid(job_id) associated_jobs = Job.getAssociatedJobs(job_data["uuid"]) if(job_data is not None): return jsonify({ "job_data" : [job_data], "associated_jobs" : associated_jobs }) else: return "No job data."
def view_job(job_id): user = Job.getFirstNameForUuid(job_id) if session.get("user_id") is None and user != "Guest": return redirect("/login") else: return render_template("job.html")
def queueJobInQueue(self, queue, quota_names, func, context, *args, **kwargs): portal = getUtility(ISiteRoot) portal_path = portal.getPhysicalPath() pm = getToolByName(portal, 'portal_membership') user = pm.getAuthenticatedMember() user_id = user.getId() uf_path = user.aq_parent.aq_parent.getPhysicalPath() context_path = context.getPhysicalPath() job = Job(_executeAsUser, portal_path, context_path, user_id, uf_path, func, *args, **kwargs) if quota_names: job.quota_names = quota_names job = queue.put(job) job.addCallbacks(success=job_success_callback, failure=job_failure_callback) return job
def btnRunAll_Clicked(): try: result = Job.main() tkMessageBox.showinfo("Finished", result) except Exception as e: tkMessageBox.showinfo("Error", str(e))
def handle_form(): if session.get("user_id") is None: return "You must be logged in to submit a job!" user_id = session["user_id"] print("Now creating a job on behalf of:", user_id) json_data = request.get_json() parameters = {} for (file_name, _) in json_data["files"].items(): if (".top" in file_name): parameters.update({"topology": file_name}) if (".dat" in file_name or ".conf" in file_name or ".oxdna" in file_name): parameters.update({"conf_file": file_name}) parameters.update(json_data["parameters"]) files = json_data["files"] addDefaultParameters(parameters) metadata = {} job_data = {"metadata": metadata, "parameters": parameters, "files": files} success, error_message = Job.createJobForUserIdWithData(user_id, job_data) if success: return "Success" else: return error_message
def create_analysis(jobId): print("QUERIED!") if session.get("user_id") is None: return "You must be logged in to submit a job!" userId = session["user_id"] print("Now creating a analysis on behalf of:", userId, " and for job id:", jobId) ''' json_data = request.get_json() parameters = json_data["parameters"] files = json_data["files"] addDefaultParameters(parameters) metadata = {} job_data = { "metadata":metadata, "parameters": parameters, "files": files } Job.createJobForUserIdWithData(user_id, job_data)''' return Job.createAnalysisForUserIdWithJob(userId, jobId)
def btnGenerate_Clicked(): try: queryResult = Job.executeQuery(str(eQuery.get(1.0, END)), str(eRecordType.get())) tkMessageBox.showinfo("Finished", queryResult) except Exception as e: tkMessageBox.showinfo("Error", e)
def __init__(self, qubeJobObject): logger.debug('Initialize Controller') logger.debug('Incoming Qube Job Object: ' + str(qubeJobObject)) self.job = Job.Job() self.errors = [] self.job.qubejob = qubeJobObject self.loadOptions()
def getAnalysisOutput(uuid, analysis_id, desired_output): if session.get("user_id") is None: return "You must be logged in to view the output of a job" user_directory = "/users/" + str(session["user_id"]) + "/" job_directory = user_directory + uuid + "/" desired_output_map = { "distance_data": ".txt", "distance_hist": "_hist.png", "distance_traj": "_traj.png", "distance_log": ".log" } job_data = Job.getAssociatedJobs(uuid) if job_data: for job in job_data: if job["uuid"] == analysis_id: desired_file_path = job_directory + job[ "name"] + desired_output_map[desired_output] try: return send_file(desired_file_path, as_attachment=True) except: abort( 404, description= "No {type} found for job {uuid}\nEither the job hasn't produced that output yet or something has gone horribly wrong" .format(type=desired_output, uuid=analysis_id))
def __init__(self): # counter self.st = 0 # instatiating status object self.statusObj = Status() # Loading dictionary with available parameters list self.jobStatus = lbJob.JobStatus(self.statusObj) self.states = self.jobStatus.states_names self.attrNumber = self.jobStatus.ATTR_MAX # defining fields of interest self.status = self.states.index('Status') self.reason = self.states.index('Reason') self.networkServer = self.states.index('Network server') self.destination = self.states.index('Destination') self.stateEnterTimes = self.states.index('Stateentertimes') self.doneCode = self.states.index('Done code') self.jobId = self.states.index('Jobid') import re self.ft = re.compile( "(\d+)Undefined=(\d+) Submitted=(\d+) Waiting=(\d+) Ready=(\d+) Scheduled=(\d+) Running=(\d+) Done=(\d+) Cleared=(\d+) Aborted=(\d+) Cancelled=(\d+) Unknown=(\d+) Purged=(\d+)" )
def addJob(self, fileName, method, isObject, QID, args): #print 'adding a job: ',job,'...' newJob = Job.ClientJob(self.processNumber, 'none', fileName, method, isObject, QID, args) p = mp.Process(target=self.importLibrary, args=(newJob, )) newJob.process = p self.jobsToRun.append(newJob) self.processNumber = self.processNumber + 1
def initJob(): # Get the job object jobObject = qb.jobobj() job = Job.Job(logger) # Create our own Job Object job.loadOptions(jobObject) # Load the Qube Job into our job template return job
def addJobWithExtra(self, processNumber, QJobName, fileName, method, isObject, QID, args): newJob = Job.ClientJob(processNumber, 'none', fileName, method, isObject, QID, args) newJob.QJobName = QJobName p = mp.Process(target=self.importLibrary, args=(newJob, )) newJob.process = p self.jobsToRun.append(newJob)
def worker(pool, name, start, end): name = threading.current_thread().name pool.makeActive(name) print '\nNow running: %s' % str(pool) j = Job.Job(name=name) cmd = j.renderJob(start=start, end=end) j.run(cmd) pool.makeInactive(name)
def btnRun_Clicked(): try: if (str(eRecordType.get()) == ""): tkMessageBox.showinfo("Error", "Enter an object to update") return Job.executeQuery(str(eQuery.get(1.0, END)), str(eRecordType.get())) rows = Job.readCSV(str(eRecordType.get())) result = Job.uploadResultsToSalesforce(str(eRecordType.get()), str(strVarAction.get()), rows, str(eSfUsername.get()), str(eSfPassword.get()), str(eSfSecurityToken.get()), str(vUseSfSandbox.get())) tkMessageBox.showinfo("Finished", result) except Exception as e: tkMessageBox.showinfo("Error", str(e))
def timerjobdeal(self): jober = Job.jober() while True: self.__jobwaitq.put(jober.getjobq()) job = self.__globalq.get() jober.addjob(job) jober.executejob() self.__globalq.task_done() time.sleep(int(self.__config.getbykey('jobsleep', 'jober')))
def newJob(self, from_reference, to_do): """ Starts a new Job in its own thread. The Object calling this method must be the endpoint of the Job. @param from_reference: the reference of the object from which the job is @param to_do: what has to be done """ self._updateUnitList() self.jobs.append(Job(from_reference, to_do, self)) self.jobs[len(jobs) - 1].start()
def getAnalysisOutput(uuid, analysis_id, desired_output): user = Job.getFirstNameForUuid(analysis_id) if session.get("user_id") is None and user != "Guest": return "You must be logged in to view the output of a job" userid = str(Job.getUserIdForJob(uuid)) user_directory = "/users/" + str(userid) + "/" job_directory = user_directory + uuid + "/" desired_output_map = { "distance_data" : ".txt", "distance_hist" : "_hist.png", "distance_traj" : "_traj.png", "distance_log" : ".log", "angle_plot_data" : ".txt", "angle_plot_hist" : "_hist.png", "angle_plot_traj" : "_traj.png", "angle_plot_log" : ".log", "energy_log" : ".log", "energy_hist" : "_hist.png", "energy_traj" : "_traj.png" } desired_file_path = "" job_data = Job.getAssociatedJobs(uuid) if job_data: for job in job_data: if job["uuid"] == analysis_id: desired_file_path = job_directory + job["name"] + desired_output_map[desired_output] if not desired_file_path: print("No output found for query {}".format) if "log" in desired_output_map: try: desired_file = open(desired_file_path, "r") desired_file_contents = desired_file.read() return Response(desired_file_contents, mimetype='text/plain') except: abort(404, description="{type} for job {uuid} is currently unfinished".format(type=desired_output, uuid=analysis_id)) else: try: return send_file(desired_file_path, as_attachment=True) except: abort(404, description="{type} for job {uuid} is currently unfinished".format(type=desired_output, uuid=analysis_id))
def getJobs(): if session.get("user_id") is None: return "You must be logged in to view your jobs" user_id = int(session["user_id"]) jobs = Job.getJobsForUserId(user_id) return jsonify(jobs)
def add_new_process(self, now): self.server_chooser.set_decision_pars(self.servers) server_number = self.server_chooser.choose_server(None) temp_job = Job.Job(now, self.limit_time_generator.generate(), self.process_time_generator.generate(), 0) added = self.servers[server_number].add_new_job(temp_job) if not added: self.logger.add_log(temp_job.forward_cost, temp_job.enter_time, temp_job.enter_time, 'blocked') self.logger.add_sample(self.servers) return
def _queueJobsInQueue(self, queue, quota_names, job_infos, serialize=True): """Queue multiple jobs in the specified queue.""" portal = getUtility(ISiteRoot) portal_path = portal.getPhysicalPath() uf_path, user_id = _getAuthenticatedUser() scheduled = [] for (func, context, args, kwargs) in job_infos: context_path = context.getPhysicalPath() job = Job(_executeAsUser, context_path, portal_path, uf_path, user_id, func, *args, **kwargs) scheduled.append(job) if serialize: job = serial(*scheduled) else: job = parallel(*scheduled) if quota_names: job.quota_names = quota_names job = queue.put(job) job.addCallbacks(success=job_success_callback, failure=job_failure_callback) return job
def _queueJobsInQueue(self, queue, quota_names, job_infos, serialize=True): portal = getUtility(ISiteRoot) portal_path = portal.getPhysicalPath() pm = getToolByName(portal, 'portal_membership') user = pm.getAuthenticatedMember() user_id = user.getId() uf_path = user.aq_parent.aq_parent.getPhysicalPath() scheduled = [] for (func, context, args, kwargs) in job_infos: context_path = context.getPhysicalPath() job = Job(_executeAsUser, portal_path, context_path, user_id, uf_path, func, *args, **kwargs) scheduled.append(job) if serialize: job = serial(*scheduled) else: job = parallel(*scheduled) if quota_names: job.quota_names = quota_names job = queue.put(job) job.addCallbacks(success=job_success_callback, failure=job_failure_callback) return job
def run(self): while True: patientAllTasksDict = self.queue.get() patientError=False subtaskList=map(lambda x: str(x), sorted(map(lambda x: int(x), patientAllTasksDict.keys()))) for subTaskGroup in subtaskList: if patientError: break subTaskGroupDict=patientAllTasksDict[subTaskGroup] for JobDict in subTaskGroupDict: self.parallelDicts.append(JobDict) jobEvent = threading.Event() jobThread = Job.pipelineJob(jobEvent, self.drmaaSession, self.logFileWriter, self.parallelDicts) if not self.runThreadAndWait(jobThread, jobEvent): patientError=True continue # break out of current iteration of for loop because error occurred in job self.parallelDicts=[] self.queue.task_done()
# my_jobs = [j for j in queue if len(j.args) >= 6 # and j.args[4] == fetchPreviews # and j.args[0] == path] # if my_jobs: # for j in my_jobs: # queue.remove(j) virtual_url_parts = context.REQUEST.get('VIRTUAL_URL_PARTS') vr_path = list(request.get('VirtualRootPhysicalPath', ())) portal = getUtility(ISiteRoot) portal_path = portal.getPhysicalPath() context_path = context.getPhysicalPath() uf_path, user_id = _getAuthenticatedUser() job = Job(_executeAsUser, context_path, portal_path, uf_path, user_id, fetchPreviews, virtual_url_parts, vr_path) # job = async.queueJob(fetchPreviews, self.context, # virtual_url_parts, vr_path) job = queue.put(job, begin_after=datetime.now(pytz.UTC) + timedelta(0, ASYNC_CONVERSION_DELAY)) job.addCallbacks(success=job_success_callback, failure=job_failure_callback) return True class RecursiveQueueJob(BrowserView): """ Queues docconv jobs for the context and everything inside. Default behaviour: * skip if type is in EXCLUDE_TYPES * skip if object already has preview/thumb images
#!/usr/local/bin/python import datetime import sys import Job import Listeners import Status print 'Starting IceFollow at', datetime.datetime.now() sys.stdout.flush() Job.runJobs(Status.StatusJob(), Listeners.ListenerJob())