def write_snippet(snippet_id, snippet_name, snippet_src, snippet_descr,
                  snippet_when, job_path, job_id, user):
    """write python snippet"""
    logger.debug('jobmodel - creating python snippet')

    script = []
    script.append("#!/usr/bin/python")
    script.append("# -*- coding: utf-8 -*-")
    script.append("")
    script.append("def run_snippet(snippet):")
    script.append(tab("import time"))
    script.append(tab("import traceback"))
    script.append(tab("from ea.automateactions.joblibrary import jobsnippet"))
    script.append(tab("step_start_time = time.time()"))
    script.append(tab('snippet.begin(description="%s")' % snippet_descr))
    script.append(tab("try:"))

    script.append(tab(write_snippet_import(snippet_id=snippet_id), nb_tab=2))
    script.append(tab("snippet.done()", nb_tab=2))
    script.append(tab("except jobsnippet.FailureException as e:"))
    script.append(tab("snippet.error(message=e)", nb_tab=2))
    script.append(tab("except Exception as e:"))
    script.append(tab("tb = traceback.format_exc()", nb_tab=2))
    script.append(tab("snippet.error(message=tb)", nb_tab=2))
    script.append(tab("step_duration = time.time() - step_start_time"))
    script.append(tab("snippet.ending(duration=step_duration)"))

    with open(n("%s/snippet%s.py" % (job_path, snippet_id)), 'wb') as fd:
        fd.write('\n'.join(script).encode('utf-8'))

    write_snippet_code(snippet_id=snippet_id,
                       snippet_src=snippet_src,
                       job_path=job_path)
    def kill(self):
        """kill the job"""
        logger.debug("jobprocess - kill the job")

        if self.process_id is None:
            return constant.NOT_FOUND

        success = constant.ERROR

        if platform.system() == "Windows":
            kill_cmd = ["taskkill"]
            kill_cmd.append("/PID")
            kill_cmd.append("%s" % self.process_id)
            kill_cmd.append("/F")
            p = subprocess.Popen(kill_cmd, stdout=subprocess.PIPE)
            p.wait()
            if not p.returncode:
                success = constant.OK
        else:
            try:
                os.kill(self.process_id, signal.SIGKILL)
                success = constant.OK
            except Exception as e:
                logger.error("jobprocess - unable to kill %s" % e)
        return success
Esempio n. 3
0
    def add_workspace(self, name):
        """create a new workspace"""
        logger.debug("workspacesmanager - add workspace name=%s" % name)

        wrk_path = "%s/%s" % (self.workspaces_path, name)
        if os.path.exists(n(wrk_path)):
            return (constant.ERROR, "workspace name must be unique")

        # checking in cache if the name is free
        if self.search_workspace(name=name):
            return (constant.ALREADY_EXISTS,
                    "workspace name must be unique in cache")

        # create main workspace folder
        os.mkdir(n(wrk_path))

        # create sub folders
        snippets_path = "%s/snippets/" % wrk_path
        actions_path = "%s/actions/" % wrk_path
        os.mkdir(n(snippets_path))
        os.mkdir(n(actions_path))

        # add workspaces in the cache and save it
        self.cache["workspaces"].append(name)

        # save cache to file
        self.save_workspaces()

        return (constant.OK, "success")
 def stop(self):
     """stop the scheduler"""
     self.mutex.acquire()
     logger.debug("scheduler - stopping scheduler")
     self.running = False
     self.event.set()
     self.mutex.release()
 def get_session(self, sess_id):
     """get session"""
     logger.debug("sessionsmanager - get session id=%s" % sess_id)
     
     if sess_id in self.sessions:
         return self.sessions[sess_id]
     return None
    def set_state(self, state):
        """set state"""
        logger.debug("jobprocess - state update %s" % state)

        self.job_state = state
        executionstorage.update_status(job_id=self.job_id,
                                       status=self.to_dict())
Esempio n. 7
0
 def get_job(self, job_id):
     """Returns the job corresponding to the id 
     passed as argument, otherwise None"""
     logger.debug("jobsmanager - get job (id=%s)" % job_id)
     
     for job in self.jobs:
         if job.job_id == job_id:
             return job
     return None
    def gensalt(self):
        """generate salt"""
        if settings.cfg['security']["salt"] is None:
            logger.debug("sessionsmanager - generate salt")
            settings.cfg['security']["salt"] = hexlify(os.urandom(20)).decode("utf8")
            settings.save()

            # generate hash password for all users
            self.genpass_session()
def write_snippet_code(snippet_id, snippet_src, job_path):
    """create python snippet source"""
    logger.debug('jobmodel - inject python source')
    script = []
    script.append("def run_snippet_code(snippet):")
    script.append(tab(snippet_src))

    with open(n("%s/snippet%s_code.py" % (job_path, snippet_id)), 'wb') as fd:
        fd.write('\n'.join(script).encode('utf-8'))
 def run(self):
     """
     Run server
     """
     logger.debug("restapi - server started")
     try:
         while not self.stop_event.isSet():
             self.httpd.serve_forever()
     except Exception as e:
         logger.error("restapi - exception: " + str(e))
     logger.debug("restapi - server stopped")
    def cancel(self):
        """cancel the result"""
        logger.debug("jobprocess - cancel the job")

        # remove the reset storage
        executionstorage.reset_storage(job_id=self.job_id)

        # delete job from disk for recursive one
        self.delete()

        return (constant.OK, "success")
    def get_user(self, login):
        """get user"""
        logger.debug("usersmanager - get user=%s" % login)

        # search the user
        u = self.search_user(login=login)
        if u is None:
            error_str = "user=%s does not exist" % login
            logger.error('usersmanager - %s' % error_str)
            return (constant.NOT_FOUND, error_str)

        return (constant.OK, [ u ])
    def genpass_session(self):
        """generate session hash secret if the salt was generated"""
        
        for u in self.cache["users"]:
            # { "login": { "role": ..., "secrets": {...} } }
            login = list(u.keys())[0]

            logger.debug("sessionsmanager - generate password session for user=%s" % login)
            u[login]["secrets"]["session"] = self.genpass_hash(password=u[login]["secrets"]["session"])

        # save changes to file
        self.save_users()
 def search_user(self, login):
     """search user by login"""
     logger.debug("usersmanager - search in "
                  "cache login=%s" % login)
                  
     found = None
     for u in self.cache["users"]:
         (l, _) = list(u.items())[0]
         if l == login:
             found = u
             break
     return found
def create_pyjob_runner(job_yaml, job_path, job_id, workspace, user):
    """create python job runner"""
    logger.debug('jobmodel - creating python job runner')

    # loading globals variables
    globals_file = '%s/%s/%s/globals.yml' % (settings.get_app_path(
    ), settings.cfg['paths']['workspaces'], workspace)
    globals_valid, yaml_globals = load_yamlfile(yaml_file=globals_file,
                                                workspace=workspace,
                                                user=user,
                                                repo=constant.REPO_WORKSPACES)
    if globals_valid != constant.OK:
        logger.error('jobmodel - invalid globals variables')
        return (constant.ERROR, {})

    script = []
    script.append("#!/usr/bin/python")
    script.append("# -*- coding: utf-8 -*-")
    script.append("")
    script.append("import sys")
    script.append("import os")
    script.append("import time")
    script.append("import json")
    script.append("import traceback")
    script.append("")
    script.append("p = os.path.dirname(os.path.abspath(__file__))")
    script.append("root_path = os.sep.join(p.split(os.sep)[:-5])")
    script.append("sys.path.insert(0, root_path)")
    script.append("")
    script.append("from ea.automateactions.joblibrary import jobtracer")
    script.append("from ea.automateactions.joblibrary import jobhandler")
    script.append("from ea.automateactions.joblibrary import jobsnippet")
    script.append("from ea.automateactions.joblibrary import datastore")
    script.append("")
    script.append("jobtracer.initialize(result_path=p)")
    script.append("")
    script.append("sys.stderr = jobtracer.StdWriter(mode_err=True)")
    script.append("sys.stdout = jobtracer.StdWriter()")
    script.append("")
    script.append("jobhandler.initialize(globals=%s)" % yaml_globals)
    script.append("datastore.initialize()")
    script.append("")
    script.append(write_snippets(job_path, job_yaml, job_id, workspace, user))
    script.append("")
    script.append("jobhandler.instance().start()")
    script.append("jobhandler.finalize()")
    script.append("ret_code = jobhandler.get_retcode()")
    script.append("sys.exit(ret_code)")

    with open(n("%s/jobrunner.py" % job_path), 'wb') as fd:
        fd.write('\n'.join(script).encode('utf-8'))

    return (constant.OK, "success")
def load_yamlstr(yaml_str):
    """open and read yaml string"""
    logger.debug('jobmodel - loading yaml from string')

    try:
        yaml_job = yaml.safe_load(yaml_str)
    except Exception as e:
        error_str = "yaml loading error - %s" % e
        logger.error('jobmodel - %s' % error_str)
        return (constant.ERROR, error_str)

    return (constant.OK, yaml_job)
 def remove_event(self, event):
     """remove event from queue"""
     self.mutex.acquire()
     try:
         if self.queue:
             logger.debug("scheduler - remove event")
             self.queue.remove(event)
             heapq.heapify(self.queue)
             del event
     except Exception as e:
         logger.error("scheduler - exception while "
                      "removing event %s: %s" % (event.ref, e) )
     self.mutex.release()
def write_snippet_import(snippet_id):
    """write snippet python import"""
    logger.debug('jobmodel - write python snippet import')

    script = []
    script.append("try:")
    script.append(
        tab("from snippet%s_code import run_snippet_code" % snippet_id))
    script.append(tab("run_snippet_code(snippet=snippet)"))
    script.append("except SyntaxError as err:")
    script.append(tab("err.lineno = err.lineno - 1"))
    script.append(tab("raise"))
    return "\n".join(script)
Esempio n. 19
0
    def schedule_job(self, user, job_descr=None,
                           job_file=None, workspace="common",
                           sched_mode=0, sched_at=(0, 0, 0, 0, 0, 0),
                           sched_timestamp=0):
        """schedule a task to run an action"""
        logger.debug("jobsmanager - schedule job")
        
        # create the job
        job = jobprocess.Job(job_mngr=self,
                             job_descr=job_descr,
                             job_file=job_file,
                             workspace=workspace,
                             sched_mode=sched_mode,
                             sched_at=sched_at,
                             user=user,
                             path_backups=self.path_bckps)
            
        # prepare the job
        success, details = job.init()
        if success != constant.OK:
            return (constant.ERROR, details)
            
        success, details = job.build()
        if success != constant.OK:
            return (constant.ERROR, details)

        # init start time of the job
        if sched_timestamp > 0:
            job.sched_timestamp = sched_timestamp
        else:
            job.init_start_time()
        
        # save the job on the disk
        success, details = job.save()
        if success != constant.OK:
            return (constant.ERROR, details)
            
        # Register the job on the scheduler
        logger.info("jobsmanager - adding job %s in scheduler" % job.job_id)
        success, details = scheduler.add_event(ref=job.job_id,
                                               timestamp=job.sched_timestamp,
                                               callback=self.execute_job,
                                               job=job)
        if success != constant.OK:
            return (constant.ERROR, "scheduler error") 
            
        job.set_event(event=details)
        self.jobs.append(job)
        
        return (constant.OK, job.job_id)
    def delete(self):
        """delete the job from disk"""
        logger.debug("jobprocess - delete the job from disk")

        if self.is_recursive():
            p = "%s/%s.json" % (self.path_backups, self.job_id)
            p = os.path.normpath(p)

            try:
                os.remove(p)
            except Exception:
                pass

        return (constant.OK, "success")
    def save(self):
        """save the job on the disk"""
        logger.debug("jobprocess - save the job on disk")

        if self.is_recursive():
            p = "%s/%s.json" % (self.path_backups, self.job_id)
            p = os.path.normpath(p)
            with open(p, "w") as fh:
                job_dict = self.to_dict()
                job_dict["job-file"] = self.job_file
                job_dict["job-descr"] = self.job_descr
                fh.write("%s" % json.dumps(job_dict))

        return (constant.OK, "success")
    def build(self):
        """build the job"""
        logger.debug("jobprocess - build python job")

        success, details = jobmodel.create_pyjob(yaml_file=self.job_file,
                                                 yaml_str=self.job_descr,
                                                 workspace=self.workspace,
                                                 user=self.user,
                                                 job_id=self.job_id)
        if success != constant.OK:
            return (constant.ERROR, details)

        # all is OK
        return (constant.OK, "success")
    def get_entries(self, workspace):  
        """get all entries according to the project id provided"""
        logger.debug("globals - get entries")

        env_file = '%s/%s/globals.yml' % (self.workspaces_path, workspace)

        entries = ""
        try:
            with open( n(env_file) ) as f:
                entries = f.read() 
        except FileNotFoundError:
            logger.error("globals - globals file missing for workspace=%s" % workspace)

        return (constant.OK, entries)
Esempio n. 24
0
    def tree_init(self):
        """init tree folders"""
        logger.debug("workspacesmanager - creating folders if missing ")

        for w in self.cache["workspaces"]:
            # create the main folder if missing
            wrk_path = "%s/%s" % (self.workspaces_path, w)
            if not os.path.exists(n(wrk_path)):
                os.mkdir(n(wrk_path))

                snippets_path = "%s/snippets/" % wrk_path
                actions_path = "%s/actions/" % wrk_path
                os.mkdir(n(snippets_path))
                os.mkdir(n(actions_path))
    def do_ldap_auth(self, login, password):
        """do ldap auth"""
        logger.debug("sessionsmanager - do ldap auth for login=%s" % login)
        
        if ldap3 is None:
            logger.error("auth failed - ldap library missing")
            return False
        
        # get ldap settings
        ldap_host_list = settings.cfg['ldap']['host']
        ldap_dn_list = settings.cfg['ldap']['dn']
        
        # define ldap server(s)
        servers_list = []
        for host in ldap_host_list:
            use_ssl = False
            ldap_port = 386
            # parse the url to extract scheme host and port
            url_parsed = urllib.parse.urlparse(host)

            if url_parsed.scheme == "ldaps":
                use_ssl = True
                ldap_port = 636

            if ":" in url_parsed.netloc:
                ldap_host, ldap_port = url_parsed.netloc.split(":")
            else:
                ldap_host = url_parsed.netloc

            server = ldap3.Server(ldap_host,
                                  port=int(ldap_port),
                                  use_ssl=use_ssl)
            servers_list.append(server)
            
        last_auth_err = ""
        for bind_dn in ldap_dn_list:
            c = ldap3.Connection(servers_list,
                                 user=bind_dn % login,
                                 password=password)

            # perform the Bind operation
            auth_success = c.bind()
            last_auth_err = c.result
            if auth_success:
                break
                
        if not auth_success:
            logger.debug("sessionsmanager - %s" % last_auth_err)
            
        return auth_success
    def init(self):
        """init result storage"""
        logger.debug("jobprocess - init result storage")

        success, details = executionstorage.init_storage(job_id=self.job_id)
        if success != constant.OK:
            return (constant.ERROR, details)

        # save job status in result
        success, details = executionstorage.init_status(job_id=self.job_id,
                                                        status=self.to_dict())
        if success != constant.OK:
            return (constant.ERROR, details)

        return (constant.OK, "success")
    def genpass_api(self):
        """generate basic secret if not provided"""
        new_secrets = False
        for u in self.cache["users"]:
            (login, p) = list(u.items())[0]
            if p["secrets"]["basic"] is None:
                logger.debug("sessionsmanager - generate password api for user=%s" % login)
                api_secret = hexlify(os.urandom(20)).decode("utf8")

                p["secrets"]["basic"] = api_secret
                new_secrets = True

        # save changes to file
        if new_secrets:
            self.save_users()
    def init_start_time(self):
        """get timestamp of the start"""
        logger.debug("jobprocess - init start time")

        y, m, d, h, mn, s = self.sched_at
        cur_dt = time.localtime()

        if self.sched_mode == constant.SCHED_NOW:
            timestamp = time.time()

        if self.sched_mode == constant.SCHED_AT:
            dt = datetime.datetime(y, m, d, h, mn, s, 0)
            timestamp = time.mktime(dt.timetuple())

        if self.sched_mode == constant.SCHED_DAILY:
            next_dt = datetime.datetime(cur_dt.tm_year, cur_dt.tm_mon,
                                        cur_dt.tm_mday, h, mn, s, 0)
            timestamp = time.mktime(next_dt.timetuple())

        if self.sched_mode == constant.SCHED_HOURLY:
            next_dt = datetime.datetime(cur_dt.tm_year, cur_dt.tm_mon,
                                        cur_dt.tm_mday, cur_dt.tm_hour, mn, s,
                                        0)
            timestamp = time.mktime(next_dt.timetuple())

        if self.sched_mode == constant.SCHED_EVERY_X:
            next_dt = datetime.datetime(cur_dt.tm_year, cur_dt.tm_mon,
                                        cur_dt.tm_mday, cur_dt.tm_hour,
                                        cur_dt.tm_min, cur_dt.tm_sec, 0)
            next_dt += datetime.timedelta(hours=h, minutes=mn, seconds=s)
            timestamp = time.mktime(next_dt.timetuple())

        if self.sched_mode == constant.SCHED_WEEKLY:
            next_dt = datetime.datetime(cur_dt.tm_year, cur_dt.tm_mon,
                                        cur_dt.tm_mday, h, mn, s, 0)
            delta = datetime.timedelta(days=1)
            while next_dt.weekday() != d:
                next_dt = next_dt + delta
            timestamp = time.mktime(next_dt.timetuple())

        self.sched_timestamp = timestamp

        # pershap the timestamp is too old
        # compute the next start time
        # only for recursive jobs
        if self.is_recursive():
            if timestamp < time.time():
                self.sched_timestamp = self.get_next_start_time()
    def update_role(self, login, role):
        """update role user"""
        logger.debug('usersmanager - update role for user=%s' % login)
        
        # search the user
        u = self.search_user(login=login)
        if u is None:
            error_str = "user=%s does not exist" % login
            logger.error('usersmanager - %s' % error_str)
            return (constant.NOT_FOUND, error_str)

        # update user in cache and save changes to file
        u[login]["role"] = role
        self.save_users()

        return (constant.OK, "success")
Esempio n. 30
0
    def load_workspaces(self):
        """load workspaces"""
        wrk_file = '%s/data/workspaces.yml' % (settings.get_app_path())
        if not os.path.isfile(n(wrk_file)):
            raise Exception("yaml workspaces file doesn't exist in data/")

        # load yaml
        with open(wrk_file, 'r') as fd:
            wrks_str = fd.read()

        try:
            self.cache = yaml.safe_load(wrks_str)
        except Exception as e:
            raise Exception("bad yaml workspaces file provided: %s" % e)

        logger.debug("workspacesmanager - workspaces cache "
                     "nb items: %s" % len(self.cache["workspaces"]))