def kill(self): """kill the job""" logger.debug("jobprocess - kill the job") if self.process_id is None: return constant.NOT_FOUND success = constant.ERROR if platform.system() == "Windows": kill_cmd = ["taskkill"] kill_cmd.append("/PID") kill_cmd.append("%s" % self.process_id) kill_cmd.append("/F") p = subprocess.Popen(kill_cmd, stdout=subprocess.PIPE) p.wait() if not p.returncode: success = constant.OK else: try: os.kill(self.process_id, signal.SIGKILL) success = constant.OK except Exception as e: logger.error("jobprocess - unable to kill %s" % e) return success
def run(self): """ Run server """ logger.debug("restapi - server started") try: while not self.stop_event.isSet(): self.httpd.serve_forever() except Exception as e: logger.error("restapi - exception: " + str(e)) logger.debug("restapi - server stopped")
def load_yamlstr(yaml_str): """open and read yaml string""" logger.debug('jobmodel - loading yaml from string') try: yaml_job = yaml.safe_load(yaml_str) except Exception as e: error_str = "yaml loading error - %s" % e logger.error('jobmodel - %s' % error_str) return (constant.ERROR, error_str) return (constant.OK, yaml_job)
def get_user(self, login): """get user""" logger.debug("usersmanager - get user=%s" % login) # search the user u = self.search_user(login=login) if u is None: error_str = "user=%s does not exist" % login logger.error('usersmanager - %s' % error_str) return (constant.NOT_FOUND, error_str) return (constant.OK, [ u ])
def create_pyjob_runner(job_yaml, job_path, job_id, workspace, user): """create python job runner""" logger.debug('jobmodel - creating python job runner') # loading globals variables globals_file = '%s/%s/%s/globals.yml' % (settings.get_app_path( ), settings.cfg['paths']['workspaces'], workspace) globals_valid, yaml_globals = load_yamlfile(yaml_file=globals_file, workspace=workspace, user=user, repo=constant.REPO_WORKSPACES) if globals_valid != constant.OK: logger.error('jobmodel - invalid globals variables') return (constant.ERROR, {}) script = [] script.append("#!/usr/bin/python") script.append("# -*- coding: utf-8 -*-") script.append("") script.append("import sys") script.append("import os") script.append("import time") script.append("import json") script.append("import traceback") script.append("") script.append("p = os.path.dirname(os.path.abspath(__file__))") script.append("root_path = os.sep.join(p.split(os.sep)[:-5])") script.append("sys.path.insert(0, root_path)") script.append("") script.append("from ea.automateactions.joblibrary import jobtracer") script.append("from ea.automateactions.joblibrary import jobhandler") script.append("from ea.automateactions.joblibrary import jobsnippet") script.append("from ea.automateactions.joblibrary import datastore") script.append("") script.append("jobtracer.initialize(result_path=p)") script.append("") script.append("sys.stderr = jobtracer.StdWriter(mode_err=True)") script.append("sys.stdout = jobtracer.StdWriter()") script.append("") script.append("jobhandler.initialize(globals=%s)" % yaml_globals) script.append("datastore.initialize()") script.append("") script.append(write_snippets(job_path, job_yaml, job_id, workspace, user)) script.append("") script.append("jobhandler.instance().start()") script.append("jobhandler.finalize()") script.append("ret_code = jobhandler.get_retcode()") script.append("sys.exit(ret_code)") with open(n("%s/jobrunner.py" % job_path), 'wb') as fd: fd.write('\n'.join(script).encode('utf-8')) return (constant.OK, "success")
def remove_event(self, event): """remove event from queue""" self.mutex.acquire() try: if self.queue: logger.debug("scheduler - remove event") self.queue.remove(event) heapq.heapify(self.queue) del event except Exception as e: logger.error("scheduler - exception while " "removing event %s: %s" % (event.ref, e) ) self.mutex.release()
def do_ldap_auth(self, login, password): """do ldap auth""" logger.debug("sessionsmanager - do ldap auth for login=%s" % login) if ldap3 is None: logger.error("auth failed - ldap library missing") return False # get ldap settings ldap_host_list = settings.cfg['ldap']['host'] ldap_dn_list = settings.cfg['ldap']['dn'] # define ldap server(s) servers_list = [] for host in ldap_host_list: use_ssl = False ldap_port = 386 # parse the url to extract scheme host and port url_parsed = urllib.parse.urlparse(host) if url_parsed.scheme == "ldaps": use_ssl = True ldap_port = 636 if ":" in url_parsed.netloc: ldap_host, ldap_port = url_parsed.netloc.split(":") else: ldap_host = url_parsed.netloc server = ldap3.Server(ldap_host, port=int(ldap_port), use_ssl=use_ssl) servers_list.append(server) last_auth_err = "" for bind_dn in ldap_dn_list: c = ldap3.Connection(servers_list, user=bind_dn % login, password=password) # perform the Bind operation auth_success = c.bind() last_auth_err = c.result if auth_success: break if not auth_success: logger.debug("sessionsmanager - %s" % last_auth_err) return auth_success
def del_result(self, job_id, user): """delete result""" if job_id not in self.cache: return (constant.NOT_FOUND, 'result id=%s does not exist' % job_id) try: path_result = "%s/%s" % (self.repo_path, job_id) shutil.rmtree(path_result) except Exception as e: logger.error("reporesults - rm result failed: %s" % e) del self.cache[job_id] return (constant.OK, 'result folder removed')
def init_storage(self, job_id): """init result storage""" # add result folder try: p = self.get_path(job_id=job_id) os.mkdir(p, 0o755) except Exception as e: logger.error("reporesults - mkdir result failed: %s" % e) return (constant.ERROR, 'add result folder error') # finally put it un the cache self.cache[job_id] = {} return (constant.OK, 'result storage initiated')
def init_cache(self): """init the cache""" for entry in list(os.scandir("%s/" % (self.repo_path))): if entry.is_dir(follow_symlinks=False): try: if not os.path.exists("%s/settings.json" % entry.path): continue with open("%s/settings.json" % entry.path, "r") as fh: entry_details = fh.read() self.cache[entry.name] = json.loads(entry_details) except Exception as e: logger.error("reporesults - bad entry: %s" % e)
def get_entries(self, workspace): """get all entries according to the project id provided""" logger.debug("globals - get entries") env_file = '%s/%s/globals.yml' % (self.workspaces_path, workspace) entries = "" try: with open( n(env_file) ) as f: entries = f.read() except FileNotFoundError: logger.error("globals - globals file missing for workspace=%s" % workspace) return (constant.OK, entries)
def update_role(self, login, role): """update role user""" logger.debug('usersmanager - update role for user=%s' % login) # search the user u = self.search_user(login=login) if u is None: error_str = "user=%s does not exist" % login logger.error('usersmanager - %s' % error_str) return (constant.NOT_FOUND, error_str) # update user in cache and save changes to file u[login]["role"] = role self.save_users() return (constant.OK, "success")
def do_session_auth(self, login, password): """do session auth""" logger.debug("sessionsmanager - do session auth " "for login=%s" % login) success = False sess_id = None u = None # search login in cache users u = usersmanager.search_user(login=login) if u is None: logger.error("sessionsmanager - auth failed " "login=%s not found" % login) return (success,u, sess_id) # auth ldap ? if settings.cfg['ldap']['authbind']: success = self.do_ldap_auth(login=login, password=password) # auth session ? else: hash_pwd = usersmanager.genpass_hash(password=password) if u[login]['secrets']["session"] == hash_pwd: success = True if not success: logger.error("sessionsmanager - auth failed " "for login %s" % login) return (success, u, sess_id) # generate session id sess_id = self.gen_sess_id() last_activity = time.time() end = time.gmtime(last_activity + self.lease) expires = time.strftime("%a, %d-%b-%Y %T GMT", end) # save-it self.sessions[sess_id] = {"last-activity": last_activity, "login": login, "expires": expires} # valid auth logger.debug("sessionsmanager - auth success " "for login %s" % login) return (success, u, sess_id)
def save_entries(self, content, workspace): """save entries""" logger.debug("globals - save entries") env_file = '%s/%s/globals.yml' % (self.workspaces_path, workspace) try: yaml.safe_load(content) except Exception as e: error_str = "invalid yaml - %s" % e logger.error('globals - %s' % error_str) return (constant.ERROR, error_str) with open( n(env_file), "w") as f: f.write(content) return (constant.OK, "success")
def reset_password(self, login): """reset password""" logger.debug('usersmanager - reset password ' 'for user=%s' % login) # search the user u = self.search_user(login=login) if u is None: error_str = "user=%s does not exist" % login logger.error('usersmanager - %s' % error_str) return (constant.NOT_FOUND, error_str) # reset password and save changes to file u[login]["secrets"]["session"] = self.genpass_hash("") self.save_users() return (constant.OK, "success")
def delete_user(self, login): """delete user""" logger.debug('usersmanager - delete user login=%s' % login) # search the user u = self.search_user(login=login) if u is None: error_str = "user=%s does not exist" % login logger.error('usersmanager - %s' % error_str) return (constant.NOT_FOUND, error_str) # remove from cache self.cache["users"].remove(u) # save users to file self.save_users() return (constant.OK, "success")
def update_event(self, event, timestamp): """update event timestamp""" self.mutex.acquire() try: if self.queue: logging.info("update event") # remove event from the queue self.queue.remove(event) heapq.heapify(self.queue) # update the timestamp of the event event.timestamp = timestamp heapq.heappush(self.queue, event) self.event.set() except Exception as e: logger.error("scheduler - exception while " "updating event %s" % event.ref) self.mutex.release()
def run(self): """run thread loop""" q = self.queue while self.running: # block until the event is set or timeout occurs self.event.wait(self.expire) if self.running: self.mutex.acquire() if q: # time to run event ? if (time.time() - q[0].timestamp) < 0: # too early, update next wake up self.expire = q[0].timestamp - time.time() self.event.clear() else: logger.debug("scheduler - running event %s" % q[0].ref) try: t = threading.Thread(target=q[0].callback, args=q[0].args, kwargs=q[0].kwargs) t.start() except Exception as e: logger.error("scheduler - exception while " "executing event %s: %s" % (q[0].ref, e)) # remove event from queue heapq.heappop(q) # queue is empty ? if q: # update next wake up self.expire = q[0].timestamp - time.time() self.event.clear() else: # no more event, go to sleep self.expire=None self.event.clear() self.mutex.release()
def do_basic_auth(self, auth): """do basic auth""" logger.debug("sessionsmanager - do basic auth ") u = None success = False if not auth.startswith("Basic "): return (success,u) try: auth_val = auth.split("Basic ")[1].strip() decoded = base64.b64decode(auth_val.encode()) decoded = decoded.decode() apikey_id, apikey_secret = decoded.rsplit(":", 1) logger.debug("sessionsmanager - basic auth " "decoded for user=%s" % apikey_id) u = usersmanager.search_user(login=apikey_id) if u is not None: if u[apikey_id]["secrets"]["basic"] == apikey_secret: logger.debug("sessionsmanager - basic auth " "success for user=%s" % apikey_id) success = True else: logger.debug("sessionsmanager - basic auth " "failed for user=%s" % apikey_id) success = False else: logger.debug("sessionsmanager - basic auth " "user=%s does not exist" % apikey_id) success = False except Exception as e: logger.error("sessionsmanager - unable to " "decode basic auth: %s" % e) return (success,u)
def update_password(self, login, curpass, newpass): """update password""" logger.debug('usersmanager - update password ' 'for user=%s' % login) # search the user u = self.search_user(login=login) if u is None: error_str = "user=%s does not exist" % login logger.error('usersmanager - %s' % error_str) return (constant.NOT_FOUND, error_str) # check the provided current password if self.genpass_hash(curpass) != u[login]["secrets"]['session']: logger.error('usersmanager - bad current password ' 'for user=%s' % login) return (constant.FAILED, "bad current password provided") # update password and save changes to file u[login]["secrets"]["session"] = self.genpass_hash(newpass) self.save_users() return (constant.OK, "success")
def load_yamlfile(yaml_file, workspace, user, repo): """load yaml file""" logger.debug('jobmodel - loading yaml from file') if repo == constant.REPO_ACTIONS: repo_path = actionstorage.instance().get_path(workspace=workspace) file_path = n("%s/%s" % (repo_path, yaml_file)) elif repo == constant.REPO_SNIPPETS: repo_path = snippetstorage.instance().get_path(workspace=workspace) file_path = n("%s/%s" % (repo_path, yaml_file)) else: file_path = n(yaml_file) if not os.path.exists(file_path): error_str = "file=%s not found " % yaml_file error_str += "in workspace=%s" % workspace logger.error('jobmodel - %s' % error_str) return (constant.NOT_FOUND, error_str) with open(file_path, 'r') as fd: yaml_str = fd.read() return load_yamlstr(yaml_str=yaml_str)
def add_user(self, login, password, role): """add user""" logger.debug('usersmanager - add user with login=%s' % login) # checking if the login is uniq u = self.search_user(login=login) if u is not None: error_str = "login (%s) already exists" % login logger.error('usersmanager - %s' % error_str) return (constant.ALREADY_EXISTS, error_str) # create random key secret for api key_secret = hexlify(os.urandom(20)).decode("utf8") # add user new_secrets = { "session": self.genpass_hash(password), "basic": key_secret } new_user = { login: { "role": role, "secrets": new_secrets} } self.cache["users"].append( new_user ) # save users to file self.save_users() return (constant.OK, "success")
def add_event(self, ref, timestamp, callback, *args, **kwargs): """add event in the queue""" self.mutex.acquire() success = constant.OK new_event = None try: logger.debug("scheduler - adding event %s" % timestamp) self.expire = timestamp - time.time() # create a new event and put it in the queue new_event = SchedulerEvent(ref, callback, timestamp, args, kwargs) heapq.heappush(self.queue, new_event ) # activate the event self.event.set() except Exception as e: logger.error("scheduler - exception while " "adding event %s" % e) success = constant.ERROR self.mutex.release() return (success, new_event)
def create_pyjob(yaml_file, yaml_str, workspace, user, job_id): """create pyjob""" logger.debug('jobmodel - creating python job') # get the job path according to the id job_path = executionstorage.get_path(job_id=job_id) # checking if yaml is provided if yaml_file is None and yaml_str is None: error_str = "no yaml file or content provided" logger.error('jobmodel - %s' % error_str) return (constant.ERROR, error_str) # loading the yaml if yaml_str is not None: yaml_valid, yaml_job = load_yamlstr(yaml_str=yaml_str) if yaml_file is not None: yaml_valid, yaml_job = load_yamlfile(yaml_file=yaml_file, workspace=workspace, user=user, repo=constant.REPO_ACTIONS) if yaml_valid != constant.OK: return (constant.ERROR, yaml_job) # create python scripts success, details = create_pyjob_runner(job_yaml=yaml_job, job_path=job_path, job_id=job_id, workspace=workspace, user=user) if success != constant.OK: return (constant.ERROR, details) return (constant.OK, "success")
def run(self): """run thread""" logger.debug("jobprocess - run the job") # prepare next run if the job is recursive if self.is_recursive(): # delete from disk the current backuped job self.delete() # register a new job with the same parameters new_start_time = self.get_next_start_time() self.job_mngr.schedule_job(user=self.user, job_descr=self.job_descr, job_file=self.job_file, workspace=self.workspace, sched_mode=self.sched_mode, sched_at=self.sched_at, sched_timestamp=new_start_time) # keep the start time of the run start_time = time.time() # change state to running self.set_state(state=constant.STATE_RUNNING) # get python path according to the os if platform.system() == "Windows": executable = settings.cfg['paths']['python-windows'] else: executable = settings.cfg['paths']['python-linux'] # prepare the path of the job p = executionstorage.get_path(job_id=self.job_id) job_path = "%s/jobrunner.py" % p job_path = n(job_path) jobtracer.initialize(result_path=n(p)) args = [executable] args.append(job_path) # run the job in a separate process jobtracer.instance().log_job_started() try: p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) self.process_id = p.pid except Exception as e: logger.error('jobprocess - unable to run job: %s' % e) self.set_state(state=constant.STATE_FAILURE) else: # wait the process to complete p.wait() retcode = p.returncode # compute the duration of the job self.job_duration = time.time() - start_time # set the final state of the job SUCCESS or FAILURE? if retcode == 0: self.set_state(state=constant.STATE_SUCCESS) else: err_str = p.stderr.read().decode("utf8") if len(err_str): jobtracer.instance().log_job_error(message=err_str) self.set_state(state=constant.STATE_FAILURE) job_result = constant.RETCODE_LIST.get(retcode, constant.STATE_FAILURE) jobtracer.instance().log_job_stopped(result=job_result, duration=self.job_duration) jobtracer.finalize() logger.info('jobprocess - job %s terminated' % self.job_id)
def start(self): """start""" if self.is_running(): logger.error("coreserver - server is already running") sys.exit(1) # run the server as daemon only for linux if platform.system() == "Linux": self.daemonize() logger.info("coreserver - starting up server...") try: cliserver.initialize(coreserver=self) logger.info("coreserver - cli [OK]") workspacesmanager.initialize(workspaces_path=n(path_workspaces)) logger.info("coreserver - workspaces manager [OK]") usersmanager.initialize() logger.info("coreserver - users manager [OK]") globalsmanager.initialize(workspaces_path=n(path_workspaces)) logger.info("coreserver - globals manager [OK]") sessionsmanager.initialize() logger.info("coreserver - sessions manager [OK]") scheduler.initialize() logger.info("coreserver - scheduler [OK]") jobsmanager.initialize(path_bckps=n(path_backups)) logger.info("coreserver - jobs manager [OK]") executionstorage.initialize(repo_path=n(path_results)) logger.info("coreserver - executions storage [OK]") actionstorage.initialize(repo_path=n(path_actions)) logger.info("coreserver - actions storage [OK]") snippetstorage.initialize(repo_path=n(path_snippets)) logger.info("coreserver - snippets storage [OK]") bind_ip = settings.cfg['network']['api-bind-ip'] bind_port = settings.cfg['network']['api-bind-port'] restapi.initialize(bind_addr=(bind_ip, bind_port)) restapi.start() logger.info("coreserver - rest api server [OK]") jobsmanager.reload_jobs() logger.info("coreserver - jobs reloaded [OK]") except Exception: tb = traceback.format_exc() logger.error("coreserver - unable to start server: %s" % tb) self.cleanup() sys.exit(3) msg_success = "server successfully started!" logger.info(msg_success) if platform.system() == "Windows": print(msg_success) self.run()