示例#1
0
    def success(self, result):
        Service.update_task(task_id=self.task_id,
                            status=Service.STATUS_SUCCESS)

        for ip in self.hosts:
            Service.update_node(node_id=self._node_map[ip],
                                status=Service.STATUS_SUCCESS)
示例#2
0
文件: api.py 项目: bppan/opendcp
def parallel_run_task():
    try:
        #read http params
        req_json = request.get_json(force=True, silent=True)
        if req_json is None:
            raise JsonEncodeException
        global_id = request.headers.get("X-CORRELATION-ID")
        if global_id is None:
            Logger.error("Missing X-CORRELATION-ID")
            return return_failed(-1, "X-CORRELATION-ID is empyt"), 400
        source = request.headers.get("X-SOURCE")
        if source is None:
            return return_failed(-1, "X-SOURCE is empyt"), 400
        Logger.debug("Run request json:" + json.dumps(req_json) +
                     str(global_id))
        parallel_nodes = conform_param(req_json, "nodes", list)
        task_name = conform_param(req_json, "name", basestring)
        tasks = conform_param(req_json, "tasks", list)
        tasktype = conform_param(req_json,
                                 "tasktype",
                                 basestring,
                                 default_value="ansible_task")
        params = conform_param(req_json, "params", dict, {}, True)
        user_name = conform_param(req_json, "user", basestring, allowNone=True)
        fork_num = conform_param(req_json, "fork_num", int, allowNone=True)

        headers = {
            'content-type': 'application/json',
            'X-CORRELATION-ID': request.headers.get("X-CORRELATION-ID"),
            'X-SOURCE': request.headers.get("X-SOURCE"),
            'Authorization': 'Basic bmlra2lfdGVzdDAwMkBzaW5hLmNuOjEyMzIyMw==',
            'Cache-Control': 'no-cache'
        }
        task = Service.get_task_by_name(task_name)
        if task is not None:
            Logger.error("task name is duplicate:" + task_name)
            return return_failed(-1, "task name is duplicate"), 400
        task_id = Service.new_task({"name": task_name})
        return_list = []
        for nodes in parallel_nodes:
            req_json['nodes'] = [nodes]
            req_json['come_from_master'] = 1
            req_json['mtask_id'] = task_id
            r = requests.post("http://%s:8000/api/run" % (nodes),
                              data=json.dumps(req_json),
                              headers=headers,
                              timeout=60)
            return_list.append(r.json()['content']['id'])
        # check task name duplicate
        return return_success(content={"id": task_id}), 200
    except JsonEncodeException as e:
        Logger.error("try run_task exception ---------@ ")
        return return_failed(-1, "json encode error"), 400
    except ParamErrorException as e:
        Logger.error("try run_task exception --------------> %s" % (str(e)))
        return return_failed(-1, "param error, error: " + e.message), 400
    except Exception as e:
        Logger.error("try run_task exception --------------> %s" % (str(e)))
        return return_failed(-1, e.message), 500
示例#3
0
 def runner_on_failed(self, host, res, ignore_errors=False):
     Logger.debug("fail -----------------------------------")
     a = json.dumps(res)  
     Logger.debug(a)
     end_time = datetime.datetime.now()                                 
     end_time = end_time.strftime('%Y-%m-%dT%H:%M:%SZ%z')
     self.log(host, FAILED, res)
     Service.update_log(host=host, global_id=self.global_id, task_uuid=self.task_uuid, task_status="failed",end_time=end_time, data=a) 
示例#4
0
 def runner_on_unreachable(self, host, res):
     Logger.debug("unreachable -----------------------------------")
     a = json.dumps(res)  
     Logger.debug(a)
     end_time = datetime.datetime.now()
     end_time = end_time.strftime('%Y-%m-%dT%H:%M:%SZ%z')        
     self.log(host, UNREACHABLE, res)
     Service.update_log(host=host, global_id=self.global_id, task_uuid=self.task_uuid, task_status="unreachable", end_time=end_time,data=a )
示例#5
0
    def runner_on_async_failed(self, host, res, jid):
        Logger.debug("async fail -----------------------------------")
        end_time = datetime.datetime.now()
        end_time = end_time.strftime('%Y-%m-%dT%H:%M:%SZ%z')        
        a = json.dumps(res)  
        Logger.debug(a)

        self.log(host, ASYNC_FAILED, res)
        Service.update_log(host=host, global_id=self.global_id, task_uuid=self.task_uuid, task_status="async_failed", end_time=end_time,data=a )
示例#6
0
 def runner_on_ok(self, host, res):
     
     Logger.debug("success **********************************")
     a = json.dumps(res)  
     Logger.debug(a)
     end_time = datetime.datetime.now()
     end_time = end_time.strftime('%Y-%m-%dT%H:%M:%SZ%z')        
     self.log(host, OK, res)
     Service.update_log(host=host, global_id=self.global_id, task_uuid=self.task_uuid, task_status="ok", end_time=end_time,data=a )
示例#7
0
文件: api.py 项目: bppan/opendcp
def run_task():
    try:

        #read http params
        req_json = request.get_json(force=True, silent=True)
        if req_json is None:
            raise JsonEncodeException
        global_id = request.headers.get("X-CORRELATION-ID")
        if global_id is None:
            Logger.error("Missing X-CORRELATION-ID")
            return return_failed(-1, "X-CORRELATION-ID is empyt"), 400
        source = request.headers.get("X-SOURCE")
        if source is None:
            return return_failed(-1, "X-SOURCE is empyt"), 400
        Logger.debug("Run request json:" + json.dumps(req_json) +
                     str(global_id))
        task_name = conform_param(req_json, "name", basestring)
        nodes = conform_param(req_json, "nodes", list)
        tasks = conform_param(req_json, "tasks", list)
        tasktype = conform_param(req_json,
                                 "tasktype",
                                 basestring,
                                 default_value="ansible_task")
        params = conform_param(req_json, "params", dict, {}, True)
        user_name = conform_param(req_json, "user", basestring, allowNone=True)
        fork_num = conform_param(req_json, "fork_num", int, allowNone=True)
        # check task name duplicate
        task = Service.get_task_by_name(task_name)
        if task is not None:
            Logger.error("task name is duplicate:" + task_name)
            return return_failed(-1, "task name is duplicate"), 400
        task_id = Service.new_task({"name": task_name})

        #submit task
        Worker.submit(
            AnsibleTask(task_id=str(task_id),
                        name=task_name,
                        hosts=nodes,
                        tasks=tasks,
                        tasktype=tasktype,
                        params=params,
                        user=user_name,
                        forks=fork_num,
                        global_id=global_id,
                        source=source,
                        result=""))

        return return_success(content={"id": task_id}), 200
    except JsonEncodeException as e:
        Logger.error("try run_task exception ---------@ ")
        return return_failed(-1, "json encode error"), 400
    except ParamErrorException as e:
        Logger.error("try run_task exception --------------> %s" % (str(e)))
        return return_failed(-1, "param error, error: " + e.message), 400
    except Exception as e:
        Logger.error("try run_task exception --------------> %s" % (str(e)))
        return return_failed(-1, e.message), 500
示例#8
0
 def playbook_on_task_start(self, name, is_conditional):
     self.task_uuid = uuid.uuid1().hex
     create_time = datetime.datetime.now()
     create_time = create_time.strftime('%Y-%m-%dT%H:%M:%SZ%z')
     for tag in self.tag_hosts:
         Service.add_log(global_id=self.global_id,
                         source=self.source,
                         task_uuid=self.task_uuid,
                         task_status="",
                         create_time=create_time,
                         end_time="",
                         data="",
                         host=tag)
示例#9
0
 def update_log(self, task_status, detail):
     finish_time = time.strftime(self.TIME_FORMAT, time.localtime())
     for i in range(1, self.max_try + 1):
         try:
             Service.update_log(host=self.host,
                                global_id=self.global_id,
                                task_uuid=self.uid,
                                task_status=task_status,
                                end_time=finish_time,
                                data=detail)
             return
         except Exception as err:
             Logger.error(
                 "Update log err for %d times:{}".format(str(err)) % i)
     raise AnsibleError("Update log DB error retry for %d times" %
                        self.max_try)
示例#10
0
 def failed(self, error):
     err_json = dict(msg=str(error))
     
     #check whether success node exist
     node_list = Service.check_task(task_id=str(self.task_id))
     successflag =False
     for node in node_list:
             if node.status==2:
                 successflag=True
                 break
     # update task
     if successflag:
         Service.update_task(task_id=self.task_id, status=Service.STATUS_PartlySuccess, err=json.dumps(err_json))
     else:
        
         Service.update_task(task_id=self.task_id, status=Service.STATUS_FAILED, err=json.dumps(err_json))
示例#11
0
    def _step_callback(self, ip, code, data=None):
        try:

            logdata = []
            node = Service.get_node_by_id(node_id=self._node_map[ip])

            if (node is not None) and (node.log is not None):

                temp = eval(node.log)
                Logger.debug("node.log:" + node.log)
                if isinstance(temp, list):

                    logdata = temp
                elif isinstance(temp, dict):

                    logdata = [temp]

            logdata = json.dumps(logdata)
            if code < 0:

                Service.update_node(node_id=self._node_map[ip],
                                    status=Service.STATUS_FAILED,
                                    log=logdata)
            elif code == 0:
                Service.update_node(node_id=self._node_map[ip],
                                    status=Service.STATUS_SUCCESS,
                                    log=logdata)
            else:
                Service.update_node(node_id=self._node_map[ip],
                                    status=Service.STATUS_RUNNING,
                                    log=logdata)
        except Exception as e:
            Logger.info(
                "step callback falied, ip: {}, error: {}, global id: {}".
                format(ip, e.message, self.global_id))
示例#12
0
 def add_log(self, task_status, detail):
     begin_time = time.strftime(self.TIME_FORMAT, time.localtime())
     for i in range(1, self.max_try + 1):
         try:
             Service.add_log(global_id=self.global_id,
                             source=self.source,
                             task_uuid=self.uid,
                             task_status=task_status,
                             create_time=begin_time,
                             end_time="",
                             data=detail,
                             host=self.host,
                             task_id=self.task_id)
             return
         except Exception as err:
             Logger.error('Add log err for %d times: {}'.format(str(err)) %
                          i)
     raise AnsibleError("Add log DB error retry for %d times" %
                        self.max_try)
示例#13
0
文件: api.py 项目: bppan/opendcp
def stop_task():
    try:
        #read http params
        req_json = request.get_json(force=True, silent=True)
        if req_json is None:
            raise JsonEncodeException
        global_id = request.headers["X-CORRELATION-ID"]
        if global_id is None:
            return_failed(-1, "X-CORRELATION-ID is Empty"), 400
        source = request.headers["X-SOURCE"]
        if source is None:
            return_failed(-1, "X-SOURCE is Empty"), 400

        Logger.debug("Stop request json:" + json.dumps(req_json) +
                     str(global_id))
        task_id = conform_param(req_json, "id", param_type=int, allowNone=True)
        task_name = conform_param(req_json,
                                  "name",
                                  param_type=basestring,
                                  allowNone=True)

        #stop task by id
        if task_id is None:
            if task_name is None:
                raise ParamErrorException("key task_id/task_name not found")
            else:
                task = Service.get_task_by_name(task_name)
                if task is None:
                    return return_failed(-1, "task not found"), 404
                task_id = task.id

        Worker.stop(str(task_id))

        # update task status
        Service.update_task(task_id, status=Service.STATUS_STOPPED)

        return return_success(), 200
    except JsonEncodeException:
        Logger.error("try stop_task exception --------------@")
        return return_failed(-1, "json encode error"), 400
    except Exception as e:
        Logger.error("try stop_task exception --------------> %s" % (str(e)))
        return return_failed(-1, e.message), 400
示例#14
0
def getlog():
    try:
        #read http params
        req_json = request.get_json(force=True, silent=True)
        if req_json is None:
            raise JsonEncodeException
        global_id = request.headers["X-CORRELATION-ID"]
        if global_id is None:
            Logger.error("X-CORRELATION-ID is Empty")
            return_failed(-1, "X-CORRELATION-ID is Empty"), 400
        source = request.headers["X-SOURCE"]
        if source is None:
            Logger.error("X-CORRELATION-ID is Empty")
            return_failed(-1, "X-SOURCE is Empty"), 400
        Logger.debug("Check request json:" + json.dumps(req_json) +
                     str(global_id))
        host = conform_param(req_json,
                             "host",
                             param_type=basestring,
                             allowNone=True)
        source = conform_param(req_json,
                               "source",
                               param_type=basestring,
                               allowNone=True)
        logs = None
        #load task by id or name
        if host is None:
            if source is None:
                raise ParamErrorException("source  not found")
            else:
                logs = Service.get_log_by_globalid_source_host(
                    global_id, source)
                if logs is None:
                    return return_failed(
                        -1, "no log found for specified name:" + source), 404
        else:
            logs = Service.get_log_by_globalid_source_host(
                global_id, source, host)
            if logs is None:
                return return_failed(
                    -1, "no task found for specified global_id source host:" +
                    str(global_id)), 404
        #return status data
        ret_log = []

        # ret_log.append(dict(
        #     global_id=log.global_id,
        #     source=log.source,
        #     log=json.loads(log.log),
        # ))
        for log in logs:
            if log.task_status == "failed":
                tmps = "global_id = %s, source = %s, create_time = %s, end_time = %s, host = %s, reuslt = %s " % (
                    log.global_id, log.source, log.create_time, log.end_time,
                    log.host, log.task_status) + "\n\t"
                redict = json.loads(log.log)
                if "results" not in redict.keys():
                    tmps += "message: "
                    if "msg" in redict.keys():
                        tmps += redict["msg"]
                    elif "stderr" in redict.keys():
                        tmps += redict["stderr"]
                    else:
                        tmps += "no error msg out"
                    tmps += "\n\t"
                    ret_log.append(tmps)
                    continue
                for i in redict["results"]:
                    if "msg" in redict.keys():
                        #if i["msg"] != "":
                        tmps += "message: "
                        tmps += i["msg"]
                        tmps += "\n\t"
                ret_log.append(tmps)
                continue
            if log.task_status == "unreacheable":
                tmps = "global_id = %s, source = %s, create_time = %s, end_time = %s, host = %s, reuslt = %s " % (
                    log.global_id, log.source, log.create_time, log.end_time,
                    log.host, log.task_status) + "\n\t"
                if "results" not in redict.keys():
                    tmps += "message: "
                    if "msg" in redict.keys():
                        tmps += redict["msg"]
                    elif "stderr" in redict.keys():
                        tmps += redict["stderr"]
                    else:
                        tmps += "no error msg out"
                    tmps += "\n\t"
                ret_log.append(tmps)
                continue
            ret_log.append(
                "global_id = %s, source = %s, create_time = %s, end_time = %s, host = %s, reuslt = %s "
                % (log.global_id, log.source, log.create_time, log.end_time,
                   log.host, log.task_status))
        return return_success(content={"log": ret_log}), 200
    except JsonEncodeException:
        Logger.error("try getlog exception --------------@")
        return return_failed(-1, "json encode error"), 400
    except Exception as e:
        Logger.error("try getlog exception --------------> %s" % (str(e)))
        return return_failed(-1, e.message), 500
示例#15
0
    def run(self):
        # insert node
        for ip in self.hosts:
            self._node_map[ip] = Service.new_node(self.task_id, ip)

        variable_manager = VariableManager()

        Logger.debug("start write ssh_key for task: {} global_id : {}".format(
            self.task_id, self.global_id))

        key_files = []

        group = Group(self.task_id)

        for h in self.hosts:

            # get ssh_key content
            key_content = _get_ssh_key(h)

            Logger.debug("read ssh_key for host: {} global_id: {}".format(
                h, self.global_id))

            # write ssh private key
            key_path = _write_ssh_key(h, key_content)

            #key_path="./tmp/97"
            Logger.debug("write ssh_key for host: {} global_id: {}".format(
                h, self.global_id))

            host_vars = dict(ansible_port=22,
                             ansible_user=self.user,
                             ansible_ssh_private_key_file="./" + key_path)

            Logger.debug("key_path: {} global_id: {}".format(
                key_path, self.global_id))

            key_files.append(key_path)

            host = Host(h)

            host.vars = host_vars

            group.add_host(host)

        # add params to each host
        if self.params is not None and isinstance(self.params, dict):
            for h in group.hosts:
                for key in self.params.keys():
                    variable_manager.set_host_variable(h, key,
                                                       self.params[key])

        Logger.debug("success write ssh_key for task: {} global_id: {}".format(
            self.task_id, self.global_id))

        # other options
        ssh_args = '-oControlMaster=auto -oControlPersist=60s -oStrictHostKeyChecking=no'
        options = _Options(connection='ssh',
                           module_path='./ansible/library',
                           forks=self.forks,
                           timeout=10,
                           remote_user=None,
                           private_key_file=None,
                           ssh_common_args=ssh_args,
                           ssh_extra_args=None,
                           sftp_extra_args=None,
                           scp_extra_args=None,
                           become=None,
                           become_method=None,
                           become_user=None,
                           verbosity=None,
                           check=False)

        if self.tasktype == "ansible_task":
            Logger.debug(
                "ansible tasks set*******************  global_id: {}".format(
                    self.global_id))
            play_source = dict(name=self.task_id,
                               hosts=self.task_id,
                               gather_facts='yes',
                               tasks=self.tasks)
        else:

            Logger.debug(
                "ansible role set******************* global_id: {}".format(
                    self.global_id))
            play_source = dict(name=self.task_id,
                               hosts=self.task_id,
                               gather_facts='yes',
                               roles=self.tasks)

        Logger.debug("start load play for task: {} global_id: {}".format(
            self.task_id, self.global_id))

        # make playbook
        playbook = Play().load(play_source,
                               variable_manager=variable_manager,
                               loader=_Loader)

        inventory = Inventory(loader=_Loader,
                              variable_manager=variable_manager)

        inventory.add_group(group)

        call_back = SyncCallbackModule(debug=True,
                                       step_callback=self._step_callback,
                                       global_id=self.global_id,
                                       source=self.source,
                                       tag_hosts=self.hosts)

        Logger.debug("success load play for task: {} global_id: {}".format(
            self.task_id, self.global_id))

        # task queue
        tqm = TaskQueueManager(inventory=inventory,
                               variable_manager=variable_manager,
                               loader=_Loader,
                               options=options,
                               passwords=None,
                               stdout_callback=call_back)

        try:
            back = tqm.run(playbook)

            Logger.info("back: {} global_id : {}".format(
                str(back), self.global_id))

            if back != 0:
                raise Exception("playbook run failed")

            return back
        finally:
            if tqm is not None:
                tqm.cleanup()
                _rm_tmp_key(key_files)
示例#16
0
文件: api.py 项目: bppan/opendcp
def getlog():
    try:
        #read http params
        req_json = request.get_json(force=True, silent=True)
        if req_json is None:
            raise JsonEncodeException
        global_id = request.headers["X-CORRELATION-ID"]
        if global_id is None:
            Logger.error("X-CORRELATION-ID is Empty")
            return_failed(-1, "X-CORRELATION-ID is Empty"), 400
        source = request.headers["X-SOURCE"]
        if source is None:
            Logger.error("X-CORRELATION-ID is Empty")
            return_failed(-1, "X-SOURCE is Empty"), 400
        Logger.debug("Check request json:" + json.dumps(req_json) +
                     str(global_id))
        host = conform_param(req_json,
                             "host",
                             param_type=basestring,
                             allowNone=True)
        source = conform_param(req_json,
                               "source",
                               param_type=basestring,
                               allowNone=True)
        logs = None
        #load task by id or name
        if host is None:
            if source is None:
                raise ParamErrorException("source  not found")
            else:
                logs = Service.get_log_by_globalid_source_host(
                    global_id, source)
                if logs is None:
                    return return_failed(
                        -1, "no log found for specified name:" + source), 404
        else:
            logs = Service.get_log_by_globalid_source_host(
                global_id, source, host)
            if logs is None:
                return return_failed(
                    -1, "no task found for specified global_id source host:" +
                    str(global_id)), 404

        ret_log = []
        ok_dict = {
            AnsibleTask.STEP_SUBMIT:
            '1.Submmit ansible task and be ready to start OK',
            AnsibleTask.STEP_SSH: '2.Download ssh_keys OK',
            AnsibleTask.STEP_INIT: '3.Init ansible config OK',
            AnsibleTask.STEP_LOAD: '4.Load play for task OK',
            AnsibleTask.STEP_RUN: '5.Begin execute play for task'
        }
        for log in logs:
            task = Service.get_task_by_id(log.task_id)
            # failed_dict = {
            #     AnsibleTask.STEP_SUBMIT: '1.Submmit ansible task and be ready to start ...',AnsibleTask.STEP_SSH:'2.Download ssh_keys ...',
            #     AnsibleTask.STEP_INIT: '3.Init ansible config ...', AnsibleTask.STEP_LOAD: '4.Load play for task ...',
            #     AnsibleTask.STEP_RUN: '5.Execute play for task ...'
            # }
            step_ret = []

            i = AnsibleTask.STEP_SUBMIT
            while i <= task.step:
                step_ret.append(ok_dict[i])
                i += 1
            # if task.step < AnsibleTask.STEP_RUN:
            #     step_ret.append(failed_dict[task.step+1])
            ret_log.extend(step_ret)
            ret_log.append(
                "global_id = %s, source = %s, create_time = %s, end_time = %s, host = %s, result = %s "
                % (log.global_id, log.source, log.create_time, log.end_time,
                   log.host, log.task_status))

            if log.task_status == "failed":
                redict = json.loads(log.log)
                tmps = "--->Run task failed!!!<--- \n\t"
                if "results" not in redict.keys():
                    tmps += "message: "
                    if "msg" in redict.keys():
                        tmps += redict["msg"]
                    elif "stderr" in redict.keys():
                        tmps += redict["stderr"]
                    else:
                        tmps += "no error msg out,maybe db op error"
                    tmps += "\n\t"
                    ret_log.append(tmps)
                    continue
                for i in redict["results"]:
                    if "msg" in redict.keys():
                        #if i["msg"] != "":
                        tmps += "message: "
                        tmps += i["msg"]
                        tmps += "\n\t"
                ret_log.append(tmps)
                continue
            if log.task_status == "unreacheable":
                tmps = "--->Run task unreacheable!!!<--- \n\t"
                tmps += log.log
                tmps += "\n\t"
                ret_log.append(tmps)
                continue

            if log.task_status == "ok":
                tmps = "--->Run task ok!!!<--- \n\t"
                tmps += log.log
                tmps += "\n\t"
                ret_log.append(tmps)
                continue
            if log.task_status == "start":
                tmps = log.log
                tmps += "\n\t"
                ret_log.append(tmps)
                continue
        return return_success(content={"log": ret_log}), 200
    except JsonEncodeException:
        Logger.error("try getlog exception --------------@")
        return return_failed(-1, "json encode error"), 400
    except Exception as e:
        Logger.error("try getlog exception --------------$> %s" % (str(e)))
        return return_failed(-1, e.message), 500
示例#17
0
文件: api.py 项目: bppan/opendcp
def check_task():
    try:
        #read http params
        req_json = request.get_json(force=True, silent=True)
        if req_json is None:
            raise JsonEncodeException
        global_id = request.headers["X-CORRELATION-ID"]
        if global_id is None:
            return_failed(-1, "X-CORRELATION-ID is Empty"), 400
        source = request.headers["X-SOURCE"]
        if source is None:
            return_failed(-1, "X-SOURCE is Empty"), 400

        Logger.debug("Check request json:" + json.dumps(req_json) +
                     str(global_id))
        task_id = conform_param(req_json, "id", param_type=int, allowNone=True)
        task_name = conform_param(req_json,
                                  "name",
                                  param_type=basestring,
                                  allowNone=True)

        #load task by id or name
        if task_id is None:
            if task_name is None:
                raise ParamErrorException("key task_id/task_name not found")
            else:
                task = Service.get_task_by_name(task_name)
                if task is None:
                    return return_failed(
                        -1,
                        "no task found for specified name:" + task_name), 404
        else:
            task = Service.get_task_by_id(task_id)
            if task is None:
                return return_failed(
                    -1, "no task found for specified id:" + str(task_id)), 404

        node_list = Service.check_task(task_id=str(task.id))

        #return status data
        ret_node = []
        for node in node_list:
            ret_node.append(
                dict(
                    ip=node.ip,
                    status=node.status,
                    #log=json.loads(node.log),
                    log=node.log,
                ))

        ret_task = dict(
            id=task.id,
            status=task.status,
            err=task.err,
        )

        return return_success(content={
            "task": ret_task,
            "nodes": ret_node
        }), 200
    except JsonEncodeException:
        Logger.error("try check_task exception --------------@")
        return return_failed(-1, "json encode error"), 400
    except Exception as e:
        Logger.error("try check_task exception -------------->$ %s" % (str(e)))
        return return_failed(-1, e.message), 500