def submit_job(file_bytes, config): """ 传输文件脚本 :param file_bytes: 文件 :param config: :return: """ try: config = eval(config) job = Job() job.name = config['name'] job.job_id = config['job_id'] job.cron = config['cron'] job.type = 1 if 'type' not in config else config['type'] job.instance_cnt = 1 if 'instance_cnt' not in config else config[ 'instance_cnt'] except Exception as e: raise ServiceException(ErrorCode.PARAM_ERROR.value, '配置信息不正确') check_exist = session.query(Job).filter( Job.job_id == job.job_id).first() if check_exist is not None: raise ServiceException(ErrorCode.NOT_FOUND, '重复的job_id') file_name = os.path.dirname( os.path.abspath(__file__)) + '/../job/' + job.job_id + '.py' with open(file_name, 'wt') as f: f.write(file_bytes) session.add(job) job.create_time = datetime.now() session.commit() return job.job_id
def build_job_from_json(cls, job_id, job_batch_num, job_content): job = Job(job_id, job_batch_num, f"job-{job_id}-{job_batch_num}") for v in job_content.values(): prev_task_ids = v.pop("prev_task_ids") v["job_id"] = job_id v["job_batch_num"] = job_batch_num t = Task(**v) t.replace_vars(job.global_vars()) if prev_task_ids: for i in prev_task_ids: t.add_prev_id(i) job.add_task(task=t) return job
def run(): job_models = [Job().dump(j) for j in get_jobs()] latest_jobs = exclude_outdated(job_models) table = create_table(latest_jobs) readme = generate_readme(table) write_readme(readme)
def jobs(self): arguments = {'count': 2000, 'result_set_id': self.id} raw_results = self.client.execute_request(rest_resource='jobs', get_arguments=arguments) return [ Job.from_json(self.client, raw_result) for raw_result in raw_results ]
def add_job(self, title, description=None, job=None): if title is None: logging.warning('Title field is mandatory to add a job') return False job = Job(id=job, title=title, description=description) with self.db.new_session() as session: session.add(job) return True
def insert_tasks(PL, task_file): pipeline = Pipeline(PL.name, PL.log_dir) logging.debug("Pipeline is: {}".format(pipeline)) task_list = PL.prepare_managed_tasks() logging.debug("Task list is: {}".format([x['name'] for x in task_list])) # we need to be able to translate the dependencies as stored in the task # list (list of other task names that a particular task depends on) # into a list of Job object references that have already been added to the # session. We will build up a dictionary of task['name'] : Job as we # insert them deps_to_job = {} print(" Inserting tasks into {}".format(task_file)) logging.info("Inserting tasks into {}".format(task_file)) try: for task in task_list: print(" -> {}".format(task['name'])) try: dependencies = [deps_to_job[d] for d in task['dependencies']] except KeyError as e: logging.exception("Key error processing dependencies") msg = "Task {} depends on a task that hasn't been been " \ "processed ({}). Check your Pipeline XML".format( task['name'], e.args[0]) raise Exception(msg) job = Job(pipeline, task['name'], task['threads'], task['stdout_path'], task['stderr_path'], task['script_path'], task['epilogue_path'], task['mem'], task['email_list'], task['mail_options'], task['batch_env'], dependencies, task['queue'], task['walltime']) deps_to_job[task['name']] = job logging.debug("Adding job {} (log dir: {}) to session".format( job.job_name, job.pipeline.log_directory)) Session.add(job) except Exception as e: logging.exception("Error inserting tasks into database") print("Error inserting tasks into database: {}".format(e), file=sys.stderr) sys.exit(6) # only commit the session if we were able to add all the jobs to the session # without catching an Exception Session.commit() logging.info(" {} tasks have been inserted into task file {}; " "(log dir: {})".format(len(task_list), task_file, PL.log_dir)) return len(task_list)
def add_job(job): print(job) # 新增skill skill_list = job["skill"] if len(skill_list) != 0: for skill in skill_list: s = db_session.query(Skill).filter( Skill.id == skill["code"]).first() print(s) if s is None: db_session.add( Skill(id=skill["code"], name=skill["description"], modify_time=myutils.get_datetime_str(), create_user=db_user)) db_session.commit() else: print(skill, "已經存在") try: # 新增job db_session.add( Job(id=job["id"], name=job["job_name"], company_name=job["company_name"], company_url=job["company_url"], url=job["url"], job_detail=job["job_detail"][:1000], modify_time=myutils.get_datetime_str(), create_user=db_user)) db_session.commit() # print("after insert Job", db_session.query(Job).filter(Job.id == job["id"]).first()) # 新增contact contact = job["contact"] db_session.add( Contact(name=contact["hrName"], email=contact["email"], visit=contact["visit"], reply=contact["reply"], phone=contact["phone"], other=contact["other"], jobid=job["id"], modify_time=myutils.get_datetime_str(), create_user=db_user)) db_session.commit() except Exception as e: print(e) print("----------錯誤資料-----------") print(job) print("----------------------------") finally: db_session.close()
def dowork(self): head, work_result = self.connectpool.getConnect( 'http://127.0.0.1:80/nmaptool/getwork', 'GET', '') jobs = json.loads(work_result) if jobs['result'] == '1': workarray = Job.Converttojobs(jobs['jobs']) self.maintask.add_work(workarray) print 'get job' self.has_work_left() else: print 'no job' time.sleep(5) self.dowork()
def fetch_job_by_id(self, job_id, job_batch_num): value = self.conn.hgetall(f"jobmanager.job-{job_id}-{job_batch_num}") if not value: return None job = Job(job_id=job_id, job_batch_num=job_batch_num) job.job_name = value.get(b"job_name").decode("utf8") job.status = int(value.get(b"status").decode("utf8")) for task in self.fetch_task_by_job_id(job_id, job_batch_num): job.add_task(task) return job
# temp=WorkFactory() # temp.dowork() #test # jobs = [] jobname = 'jobname' jobaddress = '127.0.0.1' priority = '' starttime = '2017-09-09' username = '******' jobport = '' jobstatus = '1' jobid = '123123123' result = '' endtime = '' createtime = '' argument = '' forcesearch = '' isjob = '' job = Job(jobname, jobaddress, priority, starttime, username, jobport, jobstatus, jobid, result, endtime, createtime, argument, forcesearch, isjob) jobs.append(job) maintask = sniffertask.snifferTask() maintask.add_work(jobs) while True: pass
def jobs(self): arguments = {"count": 2000, "result_set_id": self.id} raw_results = self.client.execute_request(rest_resource="jobs", get_arguments=arguments) return [Job.from_json(self.client, raw_result) for raw_result in raw_results]