Exemplo n.º 1
0
def search_job_by_deadline(jobs,start,end,cursor1,cursor2):

    cursor1.execute("""
     select a.Workno, g.GoodNo,  g.GoodCd, i.GoodCd as 'RawMaterialCd', m4.Minorcd,
            a.OrderQty , a.DeliveryDate, 
            --case when i.Class3 = '061038' then '단조' else 'HEX' end as Gubun,
            case when m3.minorNm = 'Forging' then 0
			when m3.minorNm = 'Hex Bar' then 1
			when m3.minorNm = 'Round Bar' then 2
			when m3.MinorNm = 'Square Bar' then 3
			when m3.MinorNm = 'VALVE 선작업' then 4 end as RawMaterialGubun,
            --    소재사이즈
            ISNULL(i.Size, 0) as RawMaterialSize,
            --    LOK FITTING 유무
            case when g.Class3 = '061001' then 'Y' else 'N' end as LOKFITTINGYN, --LOK FITTING
            --    LOK FITTING  일때 반제품 품번 사이즈에 따른 기계배정
            case when g.Class3 = '061001' and ((LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),3) = '-1-') or
                                               (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),3) = '-2-') or
                                               (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),3) = '-3-') or
                                               (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),4) = '-2M-') or
                                               (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),4) = '-3M-') or
                                               (LEFT(REPLACE(g.GoodNo, RTRIM(m4.MinorNm),''),4) = '-4M-')) then 'Y' else 'N' end as LOKFITTINGSIZEYN, a.CloseDate, a.Credate, a.Moddate
     from TWorkreport_Han_Eng a
     inner join TGood g on a.Goodcd = g.GoodCd
     inner join TGood i on a.Raw_Materialcd = i.GoodCd
     left outer join TMinor m3 on i.Class3 = m3.MinorCd
     left outer join TMinor m4 on g.Class4 = m4.MinorCd

     where DeliveryDate between '20190101' and '20190131'
        and PmsYn = 'N'
        and ContractYn = '1' 
       --    단조    Hex Bar    Round Bar    Square Bar    VALVE 선작업
       and i.Class3 in ('061038', '061039', '061040', '061048', '061126')
	 order by DeliveryDate
    """)
    row = cursor1.fetchone()

    while row:
        temp = Job.Job()
        temp2 = Job.Job()
        temp.setWorkNo(row[0])
        temp.setGoodNo(row[1])
        temp.setGoodCd(row[2])
        goodcd = row[2]
        temp.setRawmaterialCd(row[3])
        temp.setOrderQty(row[5])
        temp.setDeliveryDate(row[6])
        temp.setRawmaterialGubun(row[7])
        temp.setSpec(row[8])
        temp.setLOKFitting(row[9])
        temp.setLOKFittingSize(row[10])
        temp.setCycletime(search_cycle_time(cursor2,  goodcd))
        if(len(temp.getCycletime()) ==0):
            row = cursor1.fetchone()
        else:
            jobs.append(temp)
            row = cursor1.fetchone()

    total_number = len(jobs)
    print("the total # of job : %d"%(total_number))
Exemplo n.º 2
0
    def prepare_jobs(self):

        self.jobs = list()

        if self.single_file_list:
            if not os.path.isfile(self.single_file_list):
                raise FileNotFoundError("Couldn't find the bed file list ",
                                        self.single_file_list)

            command = '{executable} -i {bed_file_list} -o {merged_bed_file}'.\
                      format(executable = self.executable ,
                             bed_file_list = self.single_file_list,
                             merged_bed_file = self.bp_candidates_bed_file
                             )
            job = Job.Job(command=command,
                          output_directory=self.cluster_out_directory,
                          job_name="final_bed_merge",
                          time_limit=120,
                          cores=1,
                          memory=4096)
            self.jobs.append(job)
            return 0

        for directory in self.input_files:
            output_directory = os.path.join(self.output_directory,
                                            os.path.basename(directory))
            os.makedirs(output_directory, exist_ok=True)
            lib_bp_bed_files_list = os.path.join(
                directory, "candidate_bp_files_list.txt")
            merged_bed_file = os.path.join(output_directory,
                                           'candidate_branchpoints.bed')
            self.list_of_merged_files.append(merged_bed_file)

            if not os.path.isfile(lib_bp_bed_files_list):
                raise FileNotFoundError("Could not find the bed file list",
                                        lib_bp_bed_files_list)

            command = '{executable} -i {bed_file_list} -o {merged_bed_file}'.\
                       format(executable = self.executable ,
                              bed_file_list = lib_bp_bed_files_list,
                              merged_bed_file = merged_bed_file
                              )

            job = Job.Job(command=command,
                          output_directory=self.cluster_out_directory,
                          job_name=os.path.basename(directory),
                          time_limit=120,
                          cores=1,
                          memory=4096)
            self.jobs.append(job)

        with open(self.merged_files, 'w') as list_stream:
            for bed_file in self.list_of_merged_files:
                print(bed_file, file=list_stream)
Exemplo n.º 3
0
 def __init__(self, kLookAhead, inputFile, numberOfMachines):
     self.numberOfMachines = numberOfMachines
     self.readFile = open(inputFile, 'r')
     self.jobs = []
     self.lookAhead = kLookAhead
     self.sumJobTime = 0.0
     line = self.readFile.readline()
     self.jobs = self.readFile.readlines()
     self.readFile.close()
     for i in range(len(self.jobs)):
         self.jobs[i] = Job.Job(int(float(self.jobs[i].replace('\n', ''))),
                                self.numberOfMachines)
         self.sumJobTime += float(self.jobs[i])
     self.MAXJOB = Job.Job(0, self.numberOfMachines)
Exemplo n.º 4
0
 def __init__(self, qubeJobObject):
     logger.debug('Initialize Controller')
     logger.debug('Incoming Qube Job Object: ' + str(qubeJobObject))
     self.job = Job.Job()
     self.errors = []
     self.job.qubejob = qubeJobObject
     self.loadOptions()
    def start(self, count):
        print '\nFetching jobs from indeed.com'

        urlKeyword = "Computer Science"
        webURL = "http://www.indeed.com/jobs?q=" + urlKeyword + "&start="

        for page in range(1, 2):
            page = (page - 1) * 10
            url = "%s%d" % (webURL, page)
            target = Soup(urllib.urlopen(url), "html.parser")

            targetElements = target.findAll('div', attrs={'class': ' row result'})
            if targetElements == []:
                break
            for element in targetElements:
                try:
                    # creating a job instance to store details like job title, company, address, JobLink
                    job = Job()

                    company = element.find('span', attrs={'class': 'company'})
                    if company != None:
                        job.companyName = company.getText().strip()
                    title = element.find('a', attrs={'class': 'turnstileLink'}).attrs['title']

                    if title != None:
                        job.jobTitle = title.strip()

                    addr = element.find('span', attrs={'class': 'location'})
                    if addr != None:
                        job.address = addr.getText().strip()

                    job.homeURL = "http://www.indeed.com"
                    job.jobLink = "%s%s" % (job.homeURL, element.find('a').get('href'))

                    skillsElement = element.find('span', attrs={'class': 'experienceList'})
                    job.skills = self.utils.clean_process_summary(skillsElement)

                    summaryElement = element.find('span', attrs={'class': 'summary'})
                    job.summary = self.utils.clean_process_summary(summaryElement)

                    if ((job.jobLink != "") and (job.jobLink != None)):
                        joburl = urllib.quote(job.jobLink.encode('utf8'), ':/')
                        joblinkTarget = Soup(urllib.urlopen(joburl), "html.parser")
                        summaryElement = joblinkTarget.find('span', attrs={'class': 'summary'})
                        job.summary.extend(self.utils.clean_process_summary(summaryElement))

                    if (job.jobTitle != None and job.jobLink != None):
                        self.jobsFetched.append(job)
                        job.id = count
                        count += 1
                        # job.printDetails()

                except Exception as e:
                    print e.message
                    continue

        print "No. of jobs fetched: " + str(len(self.jobsFetched))
        print 'Fetching jobs from indeed.com completed.'
        return self.jobsFetched
Exemplo n.º 6
0
def worker(pool, name, start, end):
    name = threading.current_thread().name
    pool.makeActive(name)
    print '\nNow running: %s' % str(pool)
    j = Job.Job(name=name)
    cmd = j.renderJob(start=start, end=end)
    j.run(cmd)
    pool.makeInactive(name)
Exemplo n.º 7
0
def initJob():
    # Get the job object
    jobObject = qb.jobobj()

    job = Job.Job(logger) # Create our own Job Object
    job.loadOptions(jobObject) # Load the Qube Job into our job template

    return job
Exemplo n.º 8
0
    def start(self, count):

        print '\nFetching jobs from dice.com'

        webURL = "https://www.dice.com/jobs?q=Computer+Science&l=San+Jose%2C+CA"

        for page in range(1, 2):
            page = (page - 1) * 10
            url = "%s%d" % (webURL, page)
            target = Soup(urllib.urlopen(url), "html.parser")

            targetElements = target.findAll(
                'div', attrs={'class': 'complete-serp-result-div'})
            if targetElements == []:
                break
            for element in targetElements:
                # creating a job instance to store details like job title, company, address, JobLink
                job = Job()

                title = element.find('span', attrs={'itemprop': 'title'})
                if title != None:
                    job.jobTitle = title.getText().strip()

                company = element.find('span', attrs={'class': 'compName'})
                if company != None:
                    job.companyName = company.getText().strip()

                addr = element.find('span', attrs={'class': 'jobLoc'})
                if addr != None:
                    job.address = addr.getText().strip()

                job.homeURL = "https://www.dice.com"
                sub = element.find('a', attrs={
                    'itemprop': 'url'
                }).attrs['href']
                job.jobLink = "%s%s" % (job.homeURL, sub)

                if ((job.jobLink != "") and (job.jobLink != None)):
                    # joburl = urllib.quote(job.jobLink.encode('utf8'), ':/')
                    joblinkTarget = Soup(urllib.urlopen(job.jobLink),
                                         "html.parser")

                    summaryElement = joblinkTarget.find(
                        'div', attrs={'itemprop': 'description'})
                    job.summary = self.utils.clean_process_summary(
                        summaryElement)

                if (job.jobTitle != None and job.jobLink != None
                        and job.summary != []):
                    self.jobsFetched.append(job)
                    job.id = count
                    count += 1
                    # job.printDetails()

        print "No. of jobs fetched: " + str(len(self.jobsFetched))
        print 'Fetching jobs from dice.com completed.'
        return self.jobsFetched
Exemplo n.º 9
0
 def load_jobs(self, filename):
     '''Loads jobs from input file.'''
     with open(filename, 'r') as file:
         for line in file:
             if not line.startswith('JOB,PRIORITY,LENGTH'):
                 data = line.rstrip().split(',')
                 job = Job(data[0], int(data[1]), int(data[2]))
                 self.__jobHeap.add((job.get_priority(), job.get_length()), job)
     file.close()
Exemplo n.º 10
0
    def newJob(self, from_reference, to_do):
        """
		Starts a new Job in its own thread. The Object calling this method must be the endpoint of the Job.
		@param from_reference: the reference of the object from which the job is
		@param to_do: what has to be done
		"""
        self._updateUnitList()
        self.jobs.append(Job(from_reference, to_do, self))
        self.jobs[len(jobs) - 1].start()
Exemplo n.º 11
0
 def add_new_process(self, now):
     self.server_chooser.set_decision_pars(self.servers)
     server_number = self.server_chooser.choose_server(None)
     temp_job = Job.Job(now, self.limit_time_generator.generate(),
                        self.process_time_generator.generate(), 0)
     added = self.servers[server_number].add_new_job(temp_job)
     if not added:
         self.logger.add_log(temp_job.forward_cost, temp_job.enter_time,
                             temp_job.enter_time, 'blocked')
     self.logger.add_sample(self.servers)
     return
Exemplo n.º 12
0
    def generate_tasks (self):

        for each in self.task_parameters:

            mode,\
            startingTaskId,\
            numJobs,\
            rateOfJobGeneration,\
            startingJobId,\
            lowInstrBound,\
            highInstrBound,\
            lowMemBound,\
            highMemBound = each

            if (mode == 'W'):
                # Note that job-task correspondence is one-to-one
                webTaskBucket = []

                for taskId in xrange (startingTaskId, startingTaskId + numJobs):
                    name = "Job%s-%s" % (0, taskId)
                    reqInstr = int (random.uniform (lowInstrBound, highInstrBound))
                    reqMem = int (random.uniform (lowMemBound, highMemBound))
                    # JobId = 0, TaskId = taskId
                    jobInTask = Job (name, reqInstr, reqMem, taskId, numJobs, 0)
                    task = Task ("Task" + str(taskId), taskId, [jobInTask])
                    webTaskBucket.append(task)
                
                self.tasklist.append (webTaskBucket)

            elif (mode == 'S'):
                # Create a set of jobs for the task
                joblist = []

                for jobId in xrange (startingJobId, startingJobId + numJobs):
                    name = "Job%s-%s" % (jobId, startingTaskId)
                    reqInstr = int (random.uniform (lowInstrBound, highInstrBound))
                    reqMem = int (random.uniform (lowMemBound, highMemBound))
                    joblist.append (Job (name, reqInstr, reqMem, startingTaskId, numJobs, jobId))
             
                task = Task ("Task" + str(startingTaskId), startingTaskId, joblist)
                self.tasklist.append ([task])
Exemplo n.º 13
0
 def fixed_jobs(self, J, T, time, cpun, gpun, memb, nicb):
     jid = 1
     jobs = []
     for _ in range(J):
         tasks = []
         tid = 1
         for _ in range(T):
             tasks.append(Task(jid, tid, time, cpun, gpun, memb, nicb))
             tid += 1
         jobs.append(Job(jid, tasks))
         jid += 1
     return jobs
Exemplo n.º 14
0
    def dig(self, _course):
        jobs = []
        url_r = requests.get(self.url_base + str(_course))

        enconding = url_r.encoding if 'charset' in url_r.headers.get('content-type', '').lower() else None

        if url_r.status_code == 200:
            url_soup = BeautifulSoup(url_r.content, 'html.parser', from_encoding=enconding)
            tbody = url_soup.find('tbody')
            jobOffers = tbody.findChildren('td')
            for job in jobOffers:
                link = job.find('a', href=True)['href']
                url_offer_r = requests.get(link)

                if url_offer_r.status_code == 200:
                    job_s = BeautifulSoup(url_offer_r.content, 'html.parser')

                    divHead = job_s.find(id='item_head')
                    if divHead:
                        jobTitle = divHead.findChild('strong').text
                    else:
                        jobTitle = "Not defined"

                    divDates = job_s.find(id='type_dates')
                    if divDates:
                        jobType = divDates.findChild('strong').text
                        dates = divDates.findAll('em')
                        jobPublishment = dates[0].text
                        jobExpire = dates[1].text
                    else:
                        jobType = "Not defined"
                        jobPublishment = "Not defined"
                        jobExpire = "Not defined"

                    #---IMPLEMENT MAPS
                    divLocation = job_s.find(id='item_location').find('strong')
                    if divLocation:
                        jobAddress = divLocation.text
                    else:
                        jobAddress = "Not defined"

                    description = job_s.find(id='description').find('p').text

                    divImgs = job_s.find(id='description').findChildren('img')

                    infos = []
                    for count in range (3):
                        infos.append(divImgs[count].get('alt',''))


                    jobs.append(Job.Job(link, jobTitle, jobType, jobPublishment, jobExpire, jobAddress, description, infos))

        return jobs
Exemplo n.º 15
0
def main():
    job_queue = []

    # Read the threads and jobs information from file
    num_threads, num_jobs, process_times = read_input('testcases.txt')

    # Create jobs from process times
    for i in range(num_jobs):
        job = Job.Job(process_times[i])
        job_queue.append(job)

    # Process the jobs in parallel
    process_jobs(num_threads, job_queue)
Exemplo n.º 16
0
 def prepare_jobs(self):
     command = "{executable} -f {fasta_file} -b {bed_file} " \
               "-o {output_directory} -n {nucs}".format(
                   executable = self.executable,
                   fasta_file = self.fasta_file,
                   bed_file   = self.bed_file,
                   output_directory = self.output_directory,
                   nucs = self.number_of_nucleotides
                )
     this_job = Job.Job(command = command ,
                                     output_directory = self.cluster_out_directory ,
                                     job_name = self.name,
                                     time_limit = self.run_time , cores = 1 ,
                                     memory = self.memory)
     self.jobs.append(this_job)
Exemplo n.º 17
0
    def __define_pipeline(self, config):
        '''
        构造任务节点
        __JOB['Node_Name']存储所有job对象
        __Node存储头部和尾部节点
        '''

        for job_name, job_define in config["JOB"].items():
            self.__JOB[job_name] = Job(job_define)
            self.__Node['Node_Name'].append(job_name)
            self.__Node['START'].append(job_name)
        self.__INPUT = {}
        self.__INPUT["prefix"] = isexists(isexists(config, "INPUT"), "prefix")
        self.__INPUT["suffix"] = isexists(isexists(config, "INPUT"), "suffix")
        self.__pipeline = {"INPUT": self.__INPUT, "JOB": self.__JOB}
Exemplo n.º 18
0
def callback(ch, method, properties, body):
    
    print(' Delivery task recieved')
    job = Job()
    job = pickle.loads(body)

    d.arm_and_ready()

    d.deliver_package_to_address(job)

    d.reached_delivery_address()

    d.delivered_package()

    d.back_to_base(job)

    d.back_to_idle_state()
Exemplo n.º 19
0
 def getHPCEventJobFromEnv(self):
     tolog("getHPCEventJobFromEnv")
     try:
         # always use this filename as the new jobDef module name
         import newJobDef
         job = Job.Job()
         job.setJobDef(newJobDef.job)
         job.coreCount = 0
         job.workdir = self.__jobSite.workdir
         job.experiment = self.getExperiment()
         # figure out and set payload file names
         job.setPayloadName(self.__thisExperiment.getPayloadName(job))
         # reset the default job output file list which is anyway not correct
         job.outFiles = []
     except Exception, e:
         pilotErrorDiag = "Failed to process job info: %s" % str(e)
         tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
         self.failJob(0,
                      PilotErrors.ERR_UNKNOWN,
                      job,
                      pilotErrorDiag=pilotErrorDiag)
def ReadInput(filename):
    # read in the input file
    fp = open("input.txt", 'r')
    line = fp.readline().split()

    job_number = int(line[0])
    machine_number = int(line[1])
    jid = 0
    jobList = []
    # read all jobs and add to jobList
    while True:
        line = fp.readline()
        jid = jid + 1
        if not line:
            break
        time = line.split()
        start = time[0]
        end = time[1]
        job = Job.Job(jid, start, end)
        # add job
        jobList.append(job)
    fp.close()
    return job_number, machine_number, jobList
Exemplo n.º 21
0
# Selection setting (roulette_wheel)
population_size = 10  #66
num_iteration = 70
crossover_rate = 1  #66
mutation_rate = 1  #66

elite_selection_size = int(population_size * 0.2)
rank_selection_size = population_size * 2 - elite_selection_size
rank_selection_num = population_size - elite_selection_size
proba_list = rank_selection_get_proba_list(rank_selection_size)

# --------------------Frame-------------------------
# create jobs
jobs = []
for i in range(len(wip.values)):  #job len (100)
    jobs.append(Job(wip.iloc[i], eqp))

# create chromosomes
chromosomes = []
for i in range(population_size):
    chromosomes.append(Chromosome(len(jobs)))  #input ()

# create machines
machines = []
for i in range(len(tool.values)):
    machines.append(Machine(tool.iloc[i]))

#--------------------------------------------------
# 迭代
MakespanRecord = []
Exemplo n.º 22
0
        tolog("Current job workdir is: %s" % os.getcwd())
        tolog("Site workdir is: %s" % jobSite.workdir)
        # get the experiment object
        thisExperiment = getExperiment(runJob.getExperiment())

        tolog("RunJob will serve experiment: %s" %
              (thisExperiment.getExperiment()))

        # set the cache (used e.g. by LSST)
        #if runJob.getCache():
        #    thisExperiment.setCache(runJob.getCache())

        #JR = JobRecovery()
        try:
            job = Job.Job()
            job.setJobDef(newJobDef.job)
            job.workdir = jobSite.workdir
            job.experiment = runJob.getExperiment()
            # figure out and set payload file names
            job.setPayloadName(thisExperiment.getPayloadName(job))
        except Exception, e:
            pilotErrorDiag = "Failed to process job info: %s" % str(e)
            tolog("!!WARNING!!3000!! %s" % (pilotErrorDiag))
            runJob.failJob(0,
                           error.ERR_UNKNOWN,
                           job,
                           pilotErrorDiag=pilotErrorDiag)

        # prepare for the output file data directory
        # (will only created for jobs that end up in a 'holding' state)
Exemplo n.º 23
0
                            pilotErrorDiag = "Cannot switch to FAX site mover for transferType=%s since faxredirector is not set" % (job['transferType'])
                            pUtil.tolog("!!WARNING!!1234!! %s" % (pilotErrorDiag))
                            return None, None, pilotErrorDiag

                # convert the data into a file for child process to pick for running real job later
                try:
                    f = open("Job_%s.py" % job['PandaID'], "w")
                    print >>f, "job=", job
                    f.close()
                except Exception,e:
                    pilotErrorDiag = "[pilot] Exception caught: %s" % str(e)
                    pUtil.tolog("!!WARNING!!1200!! %s" % (pilotErrorDiag), tofile=tofile)
                    return None, None, pilotErrorDiag

                # create the new job
                newJob = Job.Job()
                newJob.setJobDef(job)  # fill up the fields with correct values now
                newJob.mkJobWorkdir(self.__pilotWorkingDir)
                self.backupJobData(newJob, job)
                newJob.datadir = self.__jobSite.workdir + "/PandaJob_%s_data" % (newJob.jobId)
                newJob.experiment = self.__thisExperiment

                if job.has_key('logGUID'):
                    logGUID = job['logGUID']
                    if logGUID != "NULL" and logGUID != "":
                        newJob.tarFileGuid = logGUID
                        pUtil.tolog("Got logGUID from server: %s" % (logGUID), tofile=tofile)
                    else:
                        pUtil.tolog("!!WARNING!!1200!! Server returned NULL logGUID", tofile=tofile)
                        pUtil.tolog("Using generated logGUID: %s" % (newJob.tarFileGuid), tofile=tofile)
                else:
Exemplo n.º 24
0
 def set_job(self, name='A Job', rate=MIN_WAGE):
     """
     Give a Job to Worker.
     """
     self.job = Job.Job(name, rate)
    def initial(self):

        job0 = Job.Job(0, 120, 1)
        job1 = Job.Job(1, 120, 1)
        job2 = Job.Job(2, 120, 1)
        job3 = Job.Job(3, 120, 1)
        job4 = Job.Job(4, 120, 1)
        job5 = Job.Job(5, 120, 1)
        job6 = Job.Job(6, 120, 1)
        job7 = Job.Job(7, 120, 1)
        job8 = Job.Job(8, 120, 1)
        job9 = Job.Job(9, 120, 1)

        job10 = Job.Job(10, 150, 1)
        job11 = Job.Job(11, 150, 1)
        job12 = Job.Job(12, 150, 1)
        job13 = Job.Job(13, 150, 1)
        job14 = Job.Job(14, 150, 1)
        job15 = Job.Job(15, 150, 1)
        job16 = Job.Job(16, 150, 1)
        job17 = Job.Job(17, 150, 1)
        job18 = Job.Job(18, 150, 1)
        job19 = Job.Job(19, 150, 1)

        job20 = Job.Job(20, 170, 1)
        job21 = Job.Job(21, 170, 1)
        job22 = Job.Job(22, 170, 1)
        job23 = Job.Job(23, 170, 1)
        job24 = Job.Job(24, 170, 1)
        job25 = Job.Job(25, 170, 1)
        job26 = Job.Job(26, 170, 1)
        job27 = Job.Job(27, 170, 1)
        job28 = Job.Job(28, 170, 1)
        job29 = Job.Job(29, 170, 1)

        self.jobs.append(job0)
        self.jobs.append(job1)
        self.jobs.append(job2)
        self.jobs.append(job3)
        self.jobs.append(job4)
        self.jobs.append(job5)
        self.jobs.append(job6)
        self.jobs.append(job7)
        self.jobs.append(job8)
        self.jobs.append(job9)
        self.jobs.append(job10)
        self.jobs.append(job11)
        self.jobs.append(job12)
        self.jobs.append(job13)
        self.jobs.append(job14)
        self.jobs.append(job15)
        self.jobs.append(job16)
        self.jobs.append(job17)
        self.jobs.append(job18)
        self.jobs.append(job19)
        self.jobs.append(job20)
        self.jobs.append(job21)
        self.jobs.append(job22)
        self.jobs.append(job23)
        self.jobs.append(job24)
        self.jobs.append(job25)
        self.jobs.append(job26)
        self.jobs.append(job27)
        self.jobs.append(job28)
        self.jobs.append(job29)

        cluster0 = Cluster.Cluster(0, 1000, 64)
        cluster1 = Cluster.Cluster(1, 1000, 64)
        cluster2 = Cluster.Cluster(2, 1000, 64)
        cluster3 = Cluster.Cluster(3, 1000, 64)
        cluster4 = Cluster.Cluster(4, 1000, 64)

        self.clusters.append(cluster0)
        self.clusters.append(cluster1)
        self.clusters.append(cluster2)
        self.clusters.append(cluster3)
        self.clusters.append(cluster4)

        self.n = len(self.jobs)
        self.m = len(self.clusters)

        self.bestSolution = [None] * self.n

        self.heuristic = [[None] * (self.m + 1)] * self.n
        for i in range(self.n):
            for j in range(self.m):
                self.heuristic[i][j] = 2

        for i in range(self.n):
            self.heuristic[i][self.m] = 0.1

        self.pheromone = [[None] * (self.m + 1)] * self.n
        for i in range(self.n):
            for j in range(self.m + 1):
                self.pheromone[i][j] = self.pheremoneInitialVal

        for i in range(self.numOfAnts):
            self.ants.append(Ant.Ant(self.n, self.m))
Exemplo n.º 26
0
 def halt(self):
     print("\nScheduler has been halted:")
     # Update the min with the remaining length
     current_key, current_job = self.__jobHeap.remove_min()
     self.__jobHeap.add((current_key[0], current_job.get_length()), current_job)
     
     # tmpHeap stores the job that is removed from the jobHeap
     tmpHeap = AdaptableHeapPriorityQueue()
     # nameDict is a dictionary that stores job names as keys and locators as values
     nameDict = {}
     # listJobs is a python list used to sort jobs
     listJobs = []        
     # prompt the user for how the table should be sorted
     srt = input("Sort by (j/p/l/h): ")
     print(self)
     try:
         while True:
             # Remove from jobHeap and print the job data if heap order is selected
             tmpKey, tmpJob = self.__jobHeap.remove_min()
             name = tmpJob.get_name()
             priority = tmpJob.get_priority()
             length = tmpJob.get_length()
             if(srt == 'h'):
                 print(tmpJob)
             # Add the job from the jobHeap to the tmpHeap
             nameDict[name] = tmpHeap.add(tmpKey, tmpJob)
             # Add the job to the listJobs
             listJobs.append((name, priority, length))
     except Empty:
         pass
     
     # Print the jobs based on the user input if other than heap order
     if srt == 'j':
         for j in sorted(listJobs, key=lambda jobs: jobs[0]):
             output = '{0:<20} '.format(j[0])
             output += '{0:>10} '.format(j[1])
             output += '{0:>10} '.format(j[2])
             print(output)
     elif srt == 'p':
         for j in sorted(listJobs, key=lambda jobs: jobs[1]):
             output = '{0:<20} '.format(j[0])
             output += '{0:>10} '.format(j[1])
             output += '{0:>10} '.format(j[2])
             print(output)
     elif srt == 'l':
         for j in sorted(listJobs, key=lambda jobs: jobs[2]):
             output = '{0:<20} '.format(j[0])
             output += '{0:>10} '.format(j[1])
             output += '{0:>10} '.format(j[2])
             print(output)
     
     # Add new job
     newJob = input("New job? (y/n): ")
     if newJob == 'y':
         name = input("New job name: ")
         priority = int(input("New job priority: "))
         length = int(input("New job length: "))
         newJob = Job(name, priority, length)
         tmpHeap.add((priority, length), newJob)
     # Alter a job
     alter = input("Alter priority? (y/n): ")
     if alter == 'y':
         name = input("Job name: ")
         priority = int(input("New priority: "))
         tmpKey, tmpJob = tmpHeap.remove(nameDict[name])
         tmpJob.set_priority(priority)
         tmpHeap.add((priority, tmpKey[1]), tmpJob)
     # Restart the scheduler
     print("Restarting scheduler...")
     self.__jobHeap = tmpHeap
Exemplo n.º 27
0
    def prepare_jobs(self):
        self.jobs = list()

        if len(self.input_files) > 2:
            raise( ExtractBpFromSamRefJobGroupException( " Found too many sam files:"\
                + "\n".join(self.input_files)) )

        arguments = " -1 " + self.input_files[0]

        if len(self.input_files) == 2:
            arguments += " -2 " + self.input_files[1]

        arguments += " -o " + self.output_directory

        command = self.executable + arguments

        job = Job.Job(command=command,
                      output_directory=self.cluster_out_directory,
                      job_name="bp_from_bed_ref",
                      time_limit=self.run_time,
                      cores=1,
                      memory=self.memory)
        self.jobs.append(job)

        if self.individual_lib_directory != "":
            library_directories = glob.glob(self.individual_lib_directory +
                                            "/*")
            library_names = map(os.path.basename, library_directories)
            self.individual_output_directory = os.path.join(
                self.output_directory, "individual_libraries")
            os.makedirs(self.individual_output_directory, exist_ok=True)

            for library_directory in library_directories:
                library_name = os.path.basename(library_directory)
                this_output_directory = os.path.join(
                    self.individual_output_directory, library_name)
                os.makedirs(this_output_directory, exist_ok=True)

                if len(self.input_files) == 1:
                    sam_file = os.path.join(library_directory, 'alignment.sam')
                    if not os.path.isfile(sam_file):
                        raise (
                            FileNotFoundError("Could not find the sam file " +
                                              sam_file))
                    arguments = " -1 " + sam_file + " -o " + this_output_directory

                elif len(self.input_files) == 2:
                    sam_file_1 = os.path.join(library_directory,
                                              'alignment_1.sam')
                    sam_file_2 = os.path.join(library_directory,
                                              'alignment_2.sam')
                    if not os.path.isfile(sam_file_1):
                        raise (
                            FileNotFoundError("Could not find the sam file " +
                                              sam_file_1))
                    if not os.path.isfile(sam_file_2):
                        raise (
                            FileNotFoundError("Could not find the sam file " +
                                              sam_file_2))

                    arguments = " -1 " + sam_file_1 + " -2 " + sam_file_2 + " -o " + this_output_directory

                command = self.executable + arguments
                job = Job.Job(command=command,
                              output_directory=self.cluster_out_directory,
                              job_name="bp_from_bed_ref",
                              time_limit=self.run_time,
                              cores=1,
                              memory=self.memory)
                self.jobs.append(job)
Exemplo n.º 28
0
    def prepare_jobs(self):

        self.jobs = list()

        if self.alignment_mode == "single":
            for directory in self.input_files:
                arranged_input_files = self.arrange_input_files(directory)

                for file in arranged_input_files:

                    input_fastq = file.strand_1_file
                    output_directory = os.path.join(
                        self.output_directory, os.path.basename(directory),
                        file.name)
                    success_file = os.path.join(output_directory,
                                                self.run_success_file_name)
                    if os.path.isfile(success_file):
                        continue
                    os.makedirs(output_directory, exist_ok=True)
                    summary_file = os.path.join(output_directory,
                                                "%s.summary" % file.name)
                    command = "{executable} -p {threads} {arguments} -x {reference} -o {output_directory} "\
                              " -U {input} &> {summary}".format(executable = self.executable ,\
                                                   threads          = self.run_threads ,\
                                                   arguments        = self.arguments ,\
                                                   output_directory = output_directory,\
                                                   reference        = self.reference ,\
                                                   input            = input_fastq ,\
                                                   summary          = summary_file )

                    job = Job.Job(command=command,
                                  output_directory=self.cluster_out_directory,
                                  job_name=file.name,
                                  time_limit=self.run_time,
                                  cores=self.run_threads,
                                  memory=self.memory)
                    self.jobs.append(job)
        else:
            for directory in self.input_files:
                arranged_input_files = self.arrange_input_files(directory)
                for file in arranged_input_files:
                    input_1_fastq = file.strand_1_file
                    input_2_fastq = file.strand_2_file
                    output_directory = os.path.join(
                        self.output_directory, os.path.basename(directory),
                        file.name)
                    success_file = os.path.join(output_directory,
                                                self.run_success_file_name)
                    if os.path.isfile(success_file):
                        continue
                    os.makedirs(output_directory, exist_ok=True)
                    summary_file = os.path.join(output_directory,
                                                "%s.summary" % file.name)
                    command = "{executable} -p {threads} {arguments} -x {reference} -o {output_directory} "\
                              " -1 {input_1} -2 {input_2}  &> {summary}".format(executable = self.executable ,\
                                                   threads          = self.run_threads ,\
                                                   arguments        = self.arguments ,\
                                                   reference        = self.reference ,\
                                                   output_directory = output_directory,\
                                                   input_1          = input_1_fastq ,\
                                                   input_2          = input_2_fastq ,\
                                                   summary          = summary_file )

                    job = Job.Job(command=command,
                                  output_directory=self.cluster_out_directory,
                                  job_name=file.name,
                                  time_limit=self.run_time,
                                  cores=self.run_threads,
                                  memory=self.memory)
                    self.jobs.append(job)
Exemplo n.º 29
0
                              reverse=True))

cncs = np.array(cncs)
cncs_forging = CNC_shape.getForgingCNC(cncs)
cncs_hex = CNC_shape.getHexCNC(cncs)
cncs_valve = CNC_shape.getValveCNC(cncs)
cncs_round = CNC_shape.getRoundCNC(cncs)
cncs_square = CNC_shape.getSquareCNC(cncs)
cncs_lok_forging = CNC_shape.getLokfittingForging(cncs)  # 1,2,3,2m,3m,4m 단조
cncs_lok_hex = CNC_shape.getLokfittingHex(cncs)  # 1,2,3,2m,3m,4m hex
cncs_3941 = CNC_shape.get3941cnc(cncs)
cncs_4042 = CNC_shape.get4042cnc(cncs)

sorted_jobs = np.array(sorted_jobs)

temp_job = Job.Job()

for jobs in sorted_jobs:
    idx = 0
    for job in jobs:
        if (job.getRawmaterialGubun() == 1 and job.getLOKFitting() == 'Y'
                and job.getLOKFittingSize() == 'N'):
            if (10 < job.getSpec() and 20 > job.getSpec()):
                temp_job = copy.deepcopy(job)
                job.setProcessCd(1)
                temp_job.setProcessCd(2)
                cncs_3941[0].setReservedTime(cncs_3941[0].getReservedTime() +
                                             job.getRequiredTime()[0] +
                                             interval_time)
                cncs_3941[0].jobAssign(job)
                cncs_3941[1].setReservedTime(cncs_3941[1].getReservedTime() +
Exemplo n.º 30
0
   def prepare_jobs(self):

      self.jobs = list()

      mate_1_option = " --norc "
      mate_2_option = " --nofw "

      if self.rna_strandness == 'R':
         mate_1_option = " --nofw "
         mate_2_option = " --norc "


      for directory in self.input_files:
          subdirectories = get_directories(directory)
          library_name = os.path.basename(directory)

          for subdir in subdirectories:
              arranged_input_files = self.arrange_input_files(
                  os.path.join(subdir, "hisat")  )
              file = arranged_input_files[0]
              input_fastq              = file.strand_1_file
              output_directory_base    = os.path.join(self.libraries_directory ,
                                                      os.path.basename(directory),
                                                      os.path.basename(subdir) )

              if self.alignment_mode == "single":
                  output_directory_1 = os.path.join(output_directory_base, 'align_bp_single_end')
                  success_file_1 = os.path.join(output_directory_1, self.run_success_file_name)
                  if os.path.isfile(success_file_1):
                      continue
              else:
                  output_directory_1 = os.path.join(output_directory_base, 'mate_1')
                  output_directory_2 = os.path.join(output_directory_base, 'mate_2')
                  os.makedirs(output_directory_2, exist_ok=True )
                  self.sam_files_2[library_name].append(os.path.join(output_directory_2, "alignment.sam"))
                  success_file_1 = os.path.join(output_directory_1, self.run_success_file_name)
                  success_file_2 = os.path.join(output_directory_2, self.run_success_file_name)
                  if os.path.isfile(success_file_1) and os.path.isfile(success_file_2):
                      continue
                  summary_file_2             = os.path.join(output_directory_2,
                                                            "%s.pipeline_summary"%file.name)


              self.sam_files_1[library_name].append(os.path.join(output_directory_1, "alignment.sam"))
              os.makedirs(output_directory_1, exist_ok=True )

              summary_file_1             = os.path.join(output_directory_1,
                                                        "%s.pipeline_summary"%file.name)
              command_1 = "{executable} -a \\\" {arguments} \\\" -x {reference} -o {output_directory} " \
                          "-p {threads} -U {input_fastq} &> {summary}".format(
                  executable = self.executable,
                  arguments = self.arguments + mate_1_option,
                  reference = self.reference,
                  output_directory = output_directory_1,
                  threads = self.run_threads,
                  input_fastq = input_fastq,
                  summary = summary_file_1
              )

              job_1 = Job.Job(command          = command_1 ,
                              output_directory = self.cluster_out_directory ,
                              job_name         = file.name,
                              time_limit       = self.run_time ,
                              cores            = self.run_threads ,
                              memory           = self.memory)

              self.jobs.append(job_1)

              if self.alignment_mode == "paired":
                  input_fastq_2              = file.strand_2_file

                  command_2 = "{executable} -a \\\" {arguments}\\\" -x {reference} -o {output_directory} " \
                              "-p {threads} -U {input_fastq} &> {summary}".format(
                      executable = self.executable,
                      arguments = self.arguments + mate_2_option,
                      reference = self.reference,
                      output_directory = output_directory_2,
                      threads = self.run_threads,
                      input_fastq = input_fastq_2,
                      summary = summary_file_2
                  )

                  job_2 = Job.Job(command          = command_2 ,
                                  output_directory = self.cluster_out_directory ,
                                  job_name         = file.name + "_2",
                                  time_limit       = self.run_time ,
                                  cores            = self.run_threads ,
                                  memory           = self.memory)

                  self.jobs.append(job_2)