Ejemplo n.º 1
0
    def nextJob(self):
        if self.currentParametersIndex >= len(self.parameterSets):
            return

        params = self.parameterSets[self.currentParametersIndex]
        self.currentParametersIndex += 1

        # check if a job with parameters has already been around
        hash = getParamSetHash(params, self.strategy.copasiConfig["params"], not self.areParametersChangeable)
        if hash in self.strategy.startedJobs:
            g.log(LOG_DEBUG, "skipping a job, parameter set already processed: {}".format(params))
            return
        self.strategy.startedJobs.add(hash)

        numFreeCores = self.numUsableCores
        with self.jobLock:
            for j in self.activeJobs:
                numFreeCores -= j.maxCores
        numFreeCores = max(1, numFreeCores)

        # setup a new job
        j = job.Job(self,
                    params, # the set of parameters
                    min(numFreeCores, self.numRunnersPerJob), # the number of simultaneous processes
                    self.areParametersChangeable) # whether to enable optimization

        # add it to the list of active jobs
        with self.jobLock:
            self.activeJobs.append(j)

        # execute the job
        if not j.execute(g.workDir, self.strategy.copasiFile):
            g.log(LOG_DEBUG, "failed to execute {}".format(j.getName()))
            self.finishJob(j)
Ejemplo n.º 2
0
    def datadir(self):
        """Returns the datadir of this LilyPond instance.

        Most times this is something like "/usr/share/lilypond/2.13.3/"
        If this method returns False, the datadir could not be determined.

        """
        if not self.abscommand():
            return False

        # First ask LilyPond itself.
        j = job.Job([self.abscommand(), '-e',
            "(display (ly:get-option 'datadir)) (newline) (exit)"])
        @j.done.connect
        def done():
            success = j.success
            if success:
                output = [line[0] for line in j.history()]
                d = output[1].strip('\n')
                if os.path.isabs(d) and os.path.isdir(d):
                    self.datadir = d
                    return

            # Then find out via the prefix.
            if self.prefix():
                dirs = ['current']
                if self.versionString():
                    dirs.append(self.versionString())
                for suffix in dirs:
                    d = os.path.join(self.prefix(), 'share', 'lilypond', suffix)
                    if os.path.isdir(d):
                        self.datadir = d
                        return
            self.datadir = False
        app.job_queue().add_job(j, 'generic')
Ejemplo n.º 3
0
def makeConfig():
    logger_obj.write_log('Making config file')
    job_obj = job.Job(logger_obj, partition_size, path_to_file,
                      path_to_bitmat_file, file_name)
    file_content = ''
    file_content = file_content + 'BITMATDUMPFILE_SPO=database/uniprot_800m_spo_pdump' + "\n"
    file_content = file_content + 'BITMATDUMPFILE_OPS=database/uniprot_800m_ops_pdump' + "\n"
    file_content = file_content + 'BITMATDUMPFILE_PSO=database/uniprot_800m_pso_sdump' + "\n"
    file_content = file_content + 'BITMATDUMPFILE_POS=database/uniprot_800m_pos_odump' + "\n"
    file_content = file_content + 'RAWDATAFILE_SPO=' + path_to_bitmat_file_config + '_spo' + "\n"
    file_content = file_content + 'RAWDATAFILE_OPS=' + path_to_bitmat_file_config + '_ops' + "\n"
    file_content = file_content + 'RAWDATAFILE_PSO=' + path_to_bitmat_file_config + '_pso' + "\n"
    file_content = file_content + 'RAWDATAFILE_POS=' + path_to_bitmat_file_config + '_pos' + "\n"
    file_content = file_content + 'RAWDATAFILE=' + path_to_bitmat_file_config + '' + "\n"
    file_content = file_content + 'NUM_SUBJECTS=' + job_obj.get_count(
        path_to_file + '-sub-all') + "\n"
    file_content = file_content + 'NUM_PREDICATES=' + job_obj.get_count(
        path_to_file + '-pre-all') + "\n"
    file_content = file_content + 'NUM_OBJECTS=' + job_obj.get_count(
        path_to_file + '-obj-all') + "\n"
    file_content = file_content + 'NUM_COMMON_SO=' + job_obj.get_count(
        path_to_file + '-common') + "\n"
    file_content = file_content + 'ROW_SIZE_BYTES=4' + "\n"
    file_content = file_content + 'GAP_SIZE_BYTES=4' + "\n"
    file_content = file_content + 'TMP_STORAGE=output' + "\n"
    file_content = file_content + 'TABLE_COL_BYTES=5' + "\n"
    file_content = file_content + 'COMPRESS_FOLDED_ARRAY=0' + "\n"
    file_obj = open(config_file, 'w')
    file_obj.write(file_content)
    file_obj.close()
    logger_obj.write_log('Finished making config file')
Ejemplo n.º 4
0
 def getJob(self):
     """Return the job this proc is running.
     @rtype:  Job
     @return: The job this proc is running."""
     response = self.stub.GetJob(host_pb2.ProcGetJobRequest(proc=self.data),
                                 timeout=Cuebot.Timeout)
     return job.Job(response.job)
Ejemplo n.º 5
0
    def delete_job(self, name, title):
        self.__list_job.clear()
        with open(FILENAME_JOB, "r") as file:
            reader_csv = csv.reader(file)
            for row in reader_csv:
                if row != [] and (row[5] != name or row[0] != title):
                    self.__list_job.append(
                        j.Job(row[0], row[1], row[2], row[3], row[4], row[5]))

        with open(FILENAME_JOB, "w") as file:
            writer_csv = csv.writer(file)
            for element in self.__list_job:
                writer_csv.writerow(
                    (element.get_title(), element.get_description(),
                     element.get_employer(), element.get_location(),
                     element.get_salary(), element.get_post_name()))

        #should delete the rows in save_job.csv that relative to the job deleted
        print(title)
        self.__list_save_job.clear()
        with open(FILENAME_SAVE_JOB, "r") as file:
            reader_csv = csv.reader(file)
            for row in reader_csv:
                if row != [] and row[1] != title:
                    self.__list_save_job.append(sa.Save(row[0], row[1]))

        with open(FILENAME_SAVE_JOB, "w") as file:
            writer_csv = csv.writer(file)
            for element in self.__list_save_job:
                writer_csv.writerow(
                    (element.get_username(), element.get_title()))
Ejemplo n.º 6
0
    def new_job(self, post_name):
        manage = Manage()

        title = input("Enter Title: ")
        description = input("Enter Description: ")
        employer = input("Enter Employer: ")
        location = input("Enter Location: ")
        salary = input("Salary: ")

        #add this job to FILENAME_NEW_JOB to notify users
        temp_entry = list()
        temp_entry.append(title)
        for user in all_users():
            temp_entry.append(user)
        with open(FILENAME_NEW_JOB, "a") as file:
            writer_csv = csv.writer(file)
            writer_csv.writerow(temp_entry)

        p = check.Input_Value()
        #check right value of salary
        while not p.isNumber(salary) or float(salary) <= 0:
            print("\nThe salary should be a positive number. Try again!")
            salary = input("Salary: ")

        job = j.Job(title, description, employer, location, salary, post_name)
        return manage.add_job(
            job, post_name
        )  # return user's name who posted a job,  We can return a tupe here
Ejemplo n.º 7
0
 def test_Jobs(self):  #adds jobs so I can test the addition
     filename = "job_data.csv"
     f = open(filename, "w+")
     f.close()
     manage = ma.Manage()
     job1 = jo.Job("Porter", "TransportsItems", "Bridges", "UCA", "10",
                   "TP1")
     assert manage.add_job(job1, "TP1") == job1.get_post_name()
     job2 = jo.Job("Developer", "WritesCode", "USF", "FL", "20", "TP2")
     assert manage.add_job(job2, "TP2") == job2.get_post_name()
     job3 = jo.Job("Tester1", "Tests", "USF", "FL", "20", "DP")
     assert manage.add_job(job3, "DP") == job3.get_post_name()
     job4 = jo.Job("Tester2", "Tests", "USF", "FL", "20", "KP")
     assert manage.add_job(job4, "KP") == job4.get_post_name()
     job5 = jo.Job("SCRUMMaster", "ManagesSCRUM", "USF", "FL", "30", "YQ")
     assert manage.add_job(job5, "YQ") == job5.get_post_name()
Ejemplo n.º 8
0
 def midi2wav(self, midfile, wavfile):
     """Run timidity to convert the MIDI to WAV."""
     self.wavfile = wavfile  # we could need to clean it up...
     j = job.Job()
     j.decoder_stdout = j.decoder_stderr = codecs.getdecoder('utf-8')
     j.command = ["timidity", midfile, "-Ow", "-o", wavfile]
     self.run_job(j)
Ejemplo n.º 9
0
 def _get_page_jobs(self, url, last_update):
     jobs = []
     t = gevent.spawn(requests.get, url)
     t.join()
     res = t.value
     bs = BeautifulSoup(res.content)
     table = bs.findAll("table", attrs = {"class":"tablelist"})[0]
     trs = table.findAll("tr", attrs = {"class":"even"})
     trs.extend(table.findAll("tr", attrs = {"class":"odd"}))
     for tr in trs:
         tds = tr.findChildren("td")
         publish_date = time.strptime(tds[-1].text, "%Y-%m-%d")
         last_update_date = time.strptime(last_update, "%Y-%m-%d")
         if publish_date < last_update_date:
             continue
         link = ""
         for attr in tds[0].a.attrs:
             if "href" in attr:
                 link = attr[1]
         single_job_url = self.root_url+link
         detail = self._get_single_job(single_job_url)
         d = job.Job()
         d.link = single_job_url
         d.pub_time = tds[-1].text
         d.location = tds[-2].text
         d.number = tds[-3].text
         d.title= tds[0].text.replace("&nbsp;","")
         d.detail = detail
         jobs.append(d)
         print "%s%s%s" % (self.__class__.__name__,
                 ": get job info from ",
                 single_job_url)
     return jobs
Ejemplo n.º 10
0
def main():
    global logger
    cl_opts = parse_command_line()
    log.setup(cl_opts.job_dir)
    logger = log.logger()
    logger.info('=== New session ===')
    j = job.Job(cl_opts.job_dir, cl_opts.restart)
    j.send_all()
 def ordonnancer_liste_job(self, liste_jobs):
     for J in liste_jobs:
         self.ordonnancer_job(J)
     nv_liste_jobs = []
     for J in liste_jobs:
         NJ = job.Job(J.numero(), J.duree_op)
         nv_liste_jobs.append(NJ)
     self.dur = self.date_disponibilite(self.nb_machines - 1)
Ejemplo n.º 12
0
 def getJobs(self):
     """Returns the jobs in this group
     @rtype:  list<Job>
     @return: List of jobs in this group"""
     response = self.stub.GetJobs(
         job_pb2.GroupGetJobsRequest(group=self.data),
         timeout=Cuebot.Timeout)
     return [job.Job(j) for j in response.jobs]
Ejemplo n.º 13
0
 def loadJobsFromFile(self, file_name="jobs.json"):
     file = open(file_name, "r")
     #id nume nr_proiecte
     jobs = json.load(file)
     for giob in jobs:
         self.jobs[giob['id']] = job.Job(giob['id'], giob['name'],
                                         giob['projects_count'],
                                         giob['seo_url'])
     return jobs
Ejemplo n.º 14
0
def create_new_jobs(quantity_by_position_id):
    # creates all new jobs with corresponding position_ids based on user input
    new_jobs = []
    for (position_id, quantity) in quantity_by_position_id.items():
        for i in range(0, quantity):
            new_jobs.append(job.Job(random.randint(2000, 100000), position_id))

    # return list of job objects
    return new_jobs
Ejemplo n.º 15
0
    def delete_job(self, name, title):
        self.__list_job.clear()
        with open(FILENAME_JOB, "r") as file:
            reader_csv = csv.reader(file)
            for row in reader_csv:
                if row != [] and (row[5] != name or row[0] != title):
                    self.__list_job.append(
                        j.Job(row[0], row[1], row[2], row[3], row[4], row[5]))

        with open(FILENAME_JOB, "w") as file:
            writer_csv = csv.writer(file)
            for element in self.__list_job:
                writer_csv.writerow(
                    (element.get_title(), element.get_description(),
                     element.get_employer(), element.get_location(),
                     element.get_salary(), element.get_post_name()))

        #should delete the rows in save_job.csv that relative to the job deleted
        print(title)
        self.__list_save_job.clear()
        with open(FILENAME_SAVE_JOB, "r") as file:
            reader_csv = csv.reader(file)
            for row in reader_csv:
                if row != [] and row[1] != title:
                    self.__list_save_job.append(sa.Save(row[0], row[1]))

        with open(FILENAME_SAVE_JOB, "w") as file:
            writer_csv = csv.writer(file)
            for element in self.__list_save_job:
                writer_csv.writerow(
                    (element.get_username(), element.get_title()))

        #for deleting the applications related to the job and notifying them about it
        self.__list_job.clear()

        #name of those who need to be notified of deleted job
        notify_applicants = list()

        with open(FILENAME_APP, "r") as file:
            reader_csv = csv.reader(file)
            for row in reader_csv:
                if (row != []) and (row[1] == title):
                    notify_applicants.append(row)
                elif (row != []):
                    self.__list_job.append(row)

        with open(FILENAME_APP, "w") as file:
            writer_csv = csv.writer(file)
            for element in self.__list_job:
                writer_csv.writerow(element)

        with open(FILENAME_DEL_JOB, "a") as file:
            writer_csv = csv.writer(file)
            for element in notify_applicants:
                writer_csv.writerow(element)

        write_jobs()
Ejemplo n.º 16
0
def spawn():
    """
    Spawns a mocked job representing a heavy task. Will run on a python
    thread and from there it will publish to a channel on redis notifying
    about its progress (Pub/Sub mechanism).
    """
    j = job.Job()
    j.start()

    return str(j.id)
Ejemplo n.º 17
0
 def run_command(self, info, args, title=None):
     """Run lilypond from info with the args list, and a job title."""
     j = self.job = job.Job()
     j.errors = 'replace'
     j.decoder_stdout = j.decoder_stderr = codecs.getdecoder('utf-8')
     j.command = [info.abscommand() or info.command] + list(args)
     if title:
         j.setTitle(title)
     self.log.connectJob(j)
     j.start()
Ejemplo n.º 18
0
 def run_lilypond(self, log_widget=None):
     """Run lilypond from info with the args list, and a job title."""
     # TODO: Use the global JobQueue
     info = self.lilypond_info
     j = self.job = job.Job([info.abscommand() or info.command] +
                            ['-dshow-available-fonts'])
     j.set_title(_("Available Fonts"))
     j.done.connect(self.process_results)
     if log_widget:
         log_widget.connectJob(j)
     j.start()
Ejemplo n.º 19
0
def defaultJob(document, args=None):
    """Return a default job for the document.

    The 'args' argument, if given, must be a list of commandline arguments
    that are given to LilyPond, and may enable specific preview modes.

    If args is not given, the Job will cause LilyPond to run in Publish mode,
    with point and click turned off.

    """
    filename, includepath = documentinfo.info(document).jobinfo(True)

    i = info(document)
    j = job.Job()

    command = [i.abscommand() or i.command]
    s = QSettings()
    s.beginGroup("lilypond_settings")
    if s.value("delete_intermediate_files", True, bool):
        command.append('-ddelete-intermediate-files')
    else:
        command.append('-dno-delete-intermediate-files')

    if args:
        command.extend(args)
    else:
        # publish mode
        command.append('-dno-point-and-click')

    if s.value("default_output_target", "pdf", str) == "svg":
        # engrave to SVG
        command.append('-dbackend=svg')
    else:
        # engrave to PDF
        if not args:
            # publish mode
            if s.value("embed_source_code", False,
                       bool) and i.version() >= (2, 19, 39):
                command.append('-dembed-source-code')
        command.append('--pdf')

    command.extend('-I' + path for path in includepath)
    j.directory = os.path.dirname(filename)
    command.append(filename)
    j.command = command
    j.environment['LD_LIBRARY_PATH'] = i.libdir()
    if s.value("no_translation", False, bool):
        j.environment['LANG'] = 'C'
        j.environment['LC_ALL'] = 'C'
    j.set_title("{0} {1} [{2}]".format(os.path.basename(i.command),
                                       i.versionString(),
                                       document.documentName()))
    return j
Ejemplo n.º 20
0
 def append(self, new_job):
     if not isinstance(new_job, dict) or any(
             attribute not in new_job for attribute in job.attributes):
         raise TypeError
     self.jobs.append(
         job.Job(id=new_job['id'],
                 container=new_job['container'],
                 driver=new_job['driver'],
                 site=new_job['site'],
                 worker=socket.gethostname(),
                 host=self.config['api']['host'],
                 token=self.config['api']['token']))
     return self.start_job(self.jobs[len(self.jobs) - 1])
Ejemplo n.º 21
0
 def get_last_queue(self):
     query = "SELECT * FROM {}".format(Queue.q_table)
     self.queue_dict_id = dict()
     self.queue_dict_pid = dict()
     for db_job in self._database.read(query):
         if db_job[2] == 'NULL':
             p_id = None
         else:
             p_id = db_job[2]
         j = job.Job(db_job[0], db_job[1], p_id, db_job[3], db_job[4],
                     str(db_job[5]))
         self.queue_dict_id[int(db_job[0])] = j
         if p_id:
             self.queue_dict_pid[int(p_id)] = j
Ejemplo n.º 22
0
    def add(self, payload):
        """Responsible of adding new jobs the the job queue."""
        if payload is None:
            log.error("Missing payload when trying to add job")
            return

        pr_id = github.pr_id(payload)
        pr_number = github.pr_number(payload)
        pr_sha1 = github.pr_sha1(payload)
        pr_full_name = github.pr_full_name(payload)

        with self.lock:
            log.info("Got GitHub initiated add {}/{} --> PR#{}".format(
                     pr_id, pr_sha1, pr_number))
            # Check whether the jobs in the current queue touches the same PR
            # number as this incoming request does.
            for i, elem in enumerate(self.q):
                job_in_queue = self.job_dict[elem]
                # Remove existing jobs as long as they are not user initiated
                # jobs.
                if (job_in_queue.pr_number() == pr_number and
                        job_in_queue.pr_full_name() == pr_full_name):
                    if not job_in_queue.user_initiated:
                        log.debug("Non user initiated job found in queue, "
                                  "removing {}".format(elem))
                        del self.q[i]
                        db.update_job(job_in_queue.pr_id(),
                                      job_in_queue.pr_sha1(),
                                      status.d[status.CANCEL], "N/A")
                        github.update_state(job_in_queue.payload,
                                            "failure", "Job cancelled!")

            # Check whether current job also should be stopped (i.e, same
            # PR, but _not_ user initiated).
            if (self.jt is not None and
                    self.jt.job.pr_number() == pr_number and
                    self.jt.job.pr_full_name == pr_full_name and not
                    self.jt.job.user_initiated):
                log.debug("Non user initiated job found running, "
                          "stopping {}".format(self.jt.job))
                self.jt.stop()

            pr_id_sha1 = "{}-{}".format(pr_id, pr_sha1)
            self.q.append(pr_id_sha1)
            new_job = job.Job(payload, False)
            self.job_dict[pr_id_sha1] = new_job
            db.add_build_record(new_job.payload)
            db.update_job(pr_id, pr_sha1, status.d[status.PENDING], "N/A")
            github.update_state(payload, "pending", "Job added to queue")
Ejemplo n.º 23
0
    def __init__(self, token):
        super().__init__(token)
        # self.job_parameters = None
        self.num_of_vacancies = 0
        self.last_position = 0
        self.list_of_vacancies = []
        self.job_parameters = job.Job()

        self.__is_authorization = False
        self.__is_auth_login = False
        self.user_login = ""
        self.user_password = ""

        self.waiting_filter_value = ""

        self.access_token = ""
Ejemplo n.º 24
0
def PrismsPriorityJob( name = "STDIN", \
               nodes = "1", \
               ppn = "16", \
               walltime = "1:00:00", \
               pmem = "3800mb", \
               exetime = None, \
               message = "a", \
               email = None, \
               command = None, \
               auto = False):
    """
    Returns a high-priority Job for the normal PRISMS queue.
    
    Normal PRISMS-related jobs are limited to 62 nodes (1000 cores), 
    and 48 hrs of walltime. They are given priority 0.
    """

    if int(nodes) * int(ppn) > 1000:
        print("Error in PrismsPriorityJob(). Requested more than 1000 cores.")
        sys.exit()

    if int(ppn) > 16:
        print("Error in PrismsPriorityJob(). Requested more than 16 ppn.")
        sys.exit()

    if misc.hours(walltime) > 48.0:
        print(
            "Error in PrismsPriorityJob(). Requested more than 48 hrs walltime."
        )
        sys.exit()

    j = job.Job( name = name, \
             account = "prismsproject_fluxoe", \
             nodes = nodes, \
             ppn = ppn, \
             walltime = walltime, \
             pmem = pmem, \
             qos = "flux", \
             queue = "fluxoe", \
             exetime = exetime, \
             message = message, \
             email = email, \
             priority = "0", \
             command = command, \
             auto = auto)

    return j
Ejemplo n.º 25
0
def main(stock='TSLA'):
    # sma(stock, True)

    j = job.Job()
    # end_time = datetime.today()
    # start_time = end_time - timeDelta  # datetime(2014, 1, 1)
    start_time, end_time = getTimeTuple()
    j.add(pdr.DataReader, stock, 'yahoo', start_time, end_time)
    p = movingAverages(stock, "simple")
    j.add(p.get_positions)
    j.add(p.dollarCostAvg)
    df = j.exec()
    print(df)
    df = p.indicators.loc[p.indicators['positions'] ** 2 == 1]
    s = back_test(j.data, df, start_time, end_time)
    print(f'{s}%')
    p.plot()
Ejemplo n.º 26
0
    def new_job(self, post_name):
        manage = Manage()
        
        title = input("Enter Title: ")
        description = input("Enter Description: ")
        employer = input("Enter Emplyer: ")
        location = input("Enter Location: ")
        salary = input("Salary: ")

        p = check.Input_Value()
        #check right value of salary
        while not p.isNumber(salary) or float(salary) <= 0:
            print("\nThe salary should be a positive number. Try again!")
            salary = input("Salary: ")

        job = j.Job(title,description,employer,location,salary, post_name)
        return manage.add_job(job,post_name) # return user's name who posted a job,  We can return a tupe here
Ejemplo n.º 27
0
    def versionString(self):
        if not self.abscommand():
            return ""

        j = job.Job([self.abscommand(), '--version'])

        @j.done.connect
        def done():
            success = j.success
            if success:
                output = ' '.join([line[0] for line in j.history()])
                m = re.search(r"\d+\.\d+(.\d+)?", output)
                self.versionString = m.group() if m else ""
            else:
                self.versionString = ""

        app.job_queue().add_job(j, 'generic')
Ejemplo n.º 28
0
 def readjobs(self, args):
     """Read jobs from LSF"""
     p = Popen(["bjobs", "-w"] + args, stdout=PIPE, stderr=PIPE)
     out, err = p.communicate()
     out = out.decode()
     err = err.decode()
     if "No unfinished job found" in err:
         return
     for line in out.split("\n")[1:-1]:
         line = line.split()
         data = {"Job": line[0], "User": line[1], "Status": line[2]}
         match = re.search("(\[\d+\])$", line[-4])
         if match:
             data["Job"] += match.groups()[0]
         if data["Job"] in Joblist.alljobs:
             self.append(Joblist.alljobs[data["Job"]])
         else:
             self.append(modulejob.Job(data))
Ejemplo n.º 29
0
    def user_add(self, pr_id, pr_sha1):
        if pr_id is None or pr_sha1 is None:
            log.error("Missing pr_id or pr_sha1 when trying to submit user "
                      "job")
            return

        with self.lock:
            log.info("Got user initiated add {}/{}".format(pr_id, pr_sha1))
            payload = db.get_payload_from_pr_id(pr_id, pr_sha1)
            if payload is None:
                log.error("Didn't find payload for ID:{}".format(pr_id))
                return

            pr_id_sha1 = "{}-{}".format(pr_id, pr_sha1)
            self.q.append(pr_id_sha1)
            self.job_dict[pr_id_sha1] = job.Job(payload, True)
            db.update_job(pr_id, pr_sha1, status.d[status.PENDING], "N/A")
            github.update_state(payload, "pending", "Job added to queue")
Ejemplo n.º 30
0
def PrismsDebugJob( name = "STDIN", \
               nodes = "1", \
               ppn = "16", \
               walltime = "1:00:00", \
               pmem = "3800mb", \
               exetime = None, \
               message = "a", \
               email = None, \
               command = None, \
               auto = False):
    """
    Returns a Job for the debug queue.

    The debug queue has 5 nodes (80 cores), and a 6 hr walltime limit.
    """

    if int(nodes)*int(ppn) > 80:
        print "Error in PrismsDebugJob(). Requested more than 80 cores."
        sys.exit()

    if int(ppn) > 16:
        print "Error in PrismsDebugJob(). Requested more than 16 ppn."
        sys.exit()

    if misc.hours(walltime) > 6.0:
        print "Error in PrismsDebugJob(). Requested more than 6 hrs walltime."
        sys.exit()

    j = job.Job( name = name, \
             account = "prismsprojectdebug_fluxoe", \
             nodes = nodes, \
             ppn = ppn, \
             walltime = walltime, \
             pmem = pmem, \
             qos = "flux", \
             queue = "fluxoe", \
             exetime = exetime, \
             message = message, \
             email = email, \
             priority = "-1000", \
             command = command, \
             auto = auto)

    return j