Ejemplo n.º 1
0
    def start_job_monitor(self):
       try:
           while True:
               newjobids = jobmodel_helper.check_new_jobs()
               if (newjobids[0] != "nojobs"):
                   print "new jobs\n \n \n"
                   for jobid in newjobids:
                       self.start_job(jobid)
               time.sleep(10)

       except Exception, e:
            print 'Unable to process in worker thread: ' + str(e)
Ejemplo n.º 2
0
    def start_job_monitor(self):
       try:
           while True:
               newjobids = jobmodel_helper.check_new_jobs()
               if (newjobids[0] != "nojobs"):
                   print("new jobs\n \n \n")
                   for jobid in newjobids:
                       self.start_job(jobid)
               time.sleep(10)

       except Exception as e:
            print('Unable to process in worker thread: ' + str(e))
Ejemplo n.º 3
0
    def start_job(self, jobid):
        print "running job with id %s"%jobid
        jobid = str(jobid)
        working_directory = os.path.join(DARE_HOME, "darefiles/", str(jobid))
        conffile = os.path.join(working_directory , str(jobid) + "-job.cfg")
        os.system("mkdir -p "+ working_directory)
        self.prepare_conf(jobid, conffile)
        jobmodel_helper.update_job_status(jobid, 2)
        # start the dare here:

        job_type = jobmodel_helper.get_jobtype(jobid)
        if(job_type == "tophatfusion"):
            exec_file = "tophatfusion_dare.py"
        elif(job_type == "chipseq"):
             mapping_tool = jobmodel_helper.get_mapping_tool(jobid)
             print "mapping toool \n\n\n", mapping_tool
             if (mapping_tool == "BOWTIE"):
                 exec_file = "scalable_chipseq_2_dare.py"
             else:
                 exec_file = "scalable_chipseq_dare.py"
        else:
            exec_file = "bfast_dare.py"

        p1 = Popen(["python", os.path.join(DARE_HOME,"examples",exec_file), "-c", conffile], stdout=PIPE)
        print "p1.pid",p1.pid
        jobmodel_helper.update_job_pid(jobid, p1.pid)

        #sts = os.waitpid(p1.pid, 0)[1]

        #update_job_status(jobid, 4)

        print '\n completed starting job%s' %jobid
Ejemplo n.º 4
0
    def start_job(self, jobid):
        print("running job with id %s"%jobid)
        jobid = str(jobid)
        working_directory = os.path.join(DARE_HOME, "darefiles/", str(jobid))
        conffile = os.path.join(working_directory , str(jobid) + "-job.cfg")
        os.system("mkdir -p "+ working_directory)
        self.prepare_conf(jobid, conffile)
        jobmodel_helper.update_job_status(jobid, 2)
        # start the dare here:

        job_type = jobmodel_helper.get_jobtype(jobid)
        if(job_type == "tophatfusion"):
            exec_file = "tophatfusion_dare.py"
        elif(job_type == "chipseq"):
             mapping_tool = jobmodel_helper.get_mapping_tool(jobid)
             print("mapping toool \n\n\n", mapping_tool)
             if (mapping_tool == "BOWTIE"):
                 exec_file = "scalable_chipseq_2_dare.py"
             else:
                 exec_file = "scalable_chipseq_dare.py"
        else:
            exec_file = "bfast_dare.py"

        p1 = Popen(["python", os.path.join(DARE_HOME,"examples",exec_file), "-c", conffile], stdout=PIPE)
        print("p1.pid",p1.pid)
        jobmodel_helper.update_job_pid(jobid, p1.pid)

        #sts = os.waitpid(p1.pid, 0)[1]

        #update_job_status(jobid, 4)

        print('\n completed starting job%s' %jobid)
Ejemplo n.º 5
0
    def prepare_conf(self, jobid, conffile):
        config = ConfigParser.ConfigParser()
        cfgfile = open(conffile,'w')
        newjobinfo = jobmodel_helper.get_jobinfo(jobid)

        section_name = 'JOB'
        config.add_section(section_name)
        config.set(section_name ,"jobid" , jobid)

        config.set(section_name ,"dare_home", DARE_HOME)
        for jobinfo in newjobinfo:
            config.set(section_name ,jobinfo.key , jobinfo.value)

        config.write(cfgfile)
        cfgfile.close()
Ejemplo n.º 6
0
    def prepare_conf(self, jobid, conffile):
        config = configparser.ConfigParser()
        cfgfile = open(conffile,'w')
        newjobinfo = jobmodel_helper.get_jobinfo(jobid)

        section_name = 'JOB'
        config.add_section(section_name)
        config.set(section_name ,"jobid" , jobid)

        config.set(section_name ,"dare_home", DARE_HOME)
        for jobinfo in newjobinfo:
            config.set(section_name ,jobinfo.key , jobinfo.value)

        config.write(cfgfile)
        cfgfile.close()
Ejemplo n.º 7
0
    def run(self):

        # create a logfile
        LOG_FILENAME = self.job_conf["log_filename"]
        print LOG_FILENAME
        self.logger = logging.getLogger("dare_multijob")
        hdlr = logging.FileHandler(LOG_FILENAME)
        formatter = logging.Formatter("%(asctime)s %(levelname)s %(message)s")
        hdlr.setFormatter(formatter)
        self.logger.addHandler(hdlr)
        self.logger.setLevel(logging.INFO)

        # first info in log file
        self.logger.info("Job id  is " + self.job_conf["jobid"])
        self.logger.info("RESOURCES used are " + self.job_conf["num_resources"])

        try:
            # get resource info
            # start the big job agents
            resource_list = []
            self.mjs = []
            for i in range(0, int(self.job_conf["num_resources"])):
                resource_list.append([])
                resource_list[i].append(dict_section(self.config, "resource_" + str(i)))
                # create multiple manyjobs
                print "Create manyjob service "
                self.mjs.append(many_job.many_job_service(resource_list[i], None))
            total_number_of_jobs = 0

            ### run the step
            wus_count = 0
            for STEP in range(0, int(self.job_conf["num_steps"])):
                starttime = time.time()

                # job started update status
                if self.load_update_env == "true":
                    jobmodel_helper.update_job_detail_status(self.job_conf["jobid"], "In step " + str(STEP + 1))

                step_wus = self.job_conf["step_" + str(STEP)].split(",")
                if ("step_" + str(STEP)) not in self.job_conf["ft_steps"].split(","):
                    ### submit the each step wus to bigjob
                    for wu_count in range(0, len(step_wus)):
                        wu = dict_section(self.config, step_wus[wu_count].strip())
                        wus_count = wus_count + 1
                        self.submit_wu(wu)
                    self.wait_for_wus(wus_count)
                else:
                    # time.sleep(10)
                    for wu_count in range(0, len(step_wus)):
                        fs = dict_section(self.config, step_wus[wu_count].strip())
                        self.submit_fs(fs)

                runtime = time.time() - starttime

                self.logger.info("STEP" + str(STEP) + " Runtime: " + str(runtime))

            # all jobs done update status
            if self.load_update_env == "true":
                jobmodel_helper.update_job_detail_status(self.job_conf["jobid"], "")
                jobmodel_helper.update_job_status(self.job_conf["jobid"], 4)

            for i in range(0, int(self.job_conf["num_resources"])):
                self.mjs[i].cancel()

        except:
            traceback.print_exc(file=sys.stdout)
            try:
                for i in range(0, int(self.job_conf["num_resources"])):
                    self.mjs[i].cancel()
            except:
                sys.exit()
Ejemplo n.º 8
0
    def run(self):

        #create a logfile
        LOG_FILENAME = self.job_conf["log_filename"]
        print(LOG_FILENAME)
        self.logger = logging.getLogger('dare_multijob')
        hdlr = logging.FileHandler(LOG_FILENAME)
        formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
        hdlr.setFormatter(formatter)
        self.logger.addHandler(hdlr)
        self.logger.setLevel(logging.INFO)

        #first info in log file
        self.logger.info("Job id  is " + self.job_conf["jobid"])
        self.logger.info("RESOURCES used are " +
                         self.job_conf["num_resources"])

        try:
            #get resource info
            #start the big job agents
            resource_list = []
            self.mjs = []
            for i in range(0, int(self.job_conf["num_resources"])):
                resource_list.append([])
                resource_list[i].append(
                    dict_section(self.config, "resource_" + str(i)))
                #create multiple manyjobs
                print("Create manyjob service ")
                self.mjs.append(
                    many_job.many_job_service(resource_list[i], None))
            total_number_of_jobs = 0

            ### run the step
            wus_count = 0
            for STEP in range(0, int(self.job_conf["num_steps"])):
                starttime = time.time()

                #job started update status
                if (self.load_update_env == "true"):
                    jobmodel_helper.update_job_detail_status(
                        self.job_conf["jobid"], "In step " + str(STEP + 1))

                step_wus = self.job_conf["step_" + str(STEP)].split(',')
                if ("step_" +
                        str(STEP)) not in self.job_conf["ft_steps"].split(','):
                    ### submit the each step wus to bigjob
                    for wu_count in range(0, len(step_wus)):
                        wu = dict_section(self.config,
                                          step_wus[wu_count].strip())
                        wus_count = wus_count + 1
                        self.submit_wu(wu)
                    self.wait_for_wus(wus_count)
                else:
                    #time.sleep(10)
                    for wu_count in range(0, len(step_wus)):
                        fs = dict_section(self.config,
                                          step_wus[wu_count].strip())
                        self.submit_fs(fs)

                runtime = time.time() - starttime

                self.logger.info("STEP" + str(STEP) + " Runtime: " +
                                 str(runtime))

            #all jobs done update status
            if (self.load_update_env == "true"):
                jobmodel_helper.update_job_detail_status(
                    self.job_conf["jobid"], "")
                jobmodel_helper.update_job_status(self.job_conf["jobid"], 4)

            for i in range(0, int(self.job_conf["num_resources"])):
                self.mjs[i].cancel()

        except:
            traceback.print_exc(file=sys.stdout)
            try:
                for i in range(0, int(self.job_conf["num_resources"])):
                    self.mjs[i].cancel()
            except:
                sys.exit()