Ejemplo n.º 1
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        hosts_info = act_info['hosts_info']
        target_ips = act_info['zookeepernode']
        bk_username = act_info['bk_username']

        zookeeper_hostname_str = get_host_str(target_ips, hosts_info)

        kwargs = {
            "bk_biz_id": app_id,
            "bk_username": bk_username,
            "task_name": f"{act_info['cluster_name']}集群新zk集群启动",
            "script_content": get_script('hadoop_bamboo/components/collections/script_templates/start_zk_server.sh'),
            "script_param": get_script_param([base_dir, cluster_user, zookeeper_hostname_str]),
            "target_server": {"ip_list": get_job_ip_list(target_ips)},
        }

        res = JobV3Api.fast_execute_script({**kwargs, **fast_execute_script_common_kwargs}, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 2
0
    def fast_execute_script(self, kwargs):
        """
        快速执行脚本
        """
        # shell 脚本内容需要base64编码
        kwargs.update({
            "bk_username": self.bk_username,
        })

        result = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if result["result"]:
            query_kwargs = {
                "job_instance_id": result["data"].get("job_instance_id"),
                "bk_biz_id": kwargs.get("bk_biz_id"),
            }
            result = self.get_task_result_status(query_kwargs)
            logger.info(
                build_job_exec_log_format(self.bk_username,
                                          'fast_execute_script',
                                          kwargs['task_name']))
            return result
        else:
            logger.error(
                build_job_err_log_format(self.bk_username,
                                         'fast_execute_script', kwargs,
                                         result))
            return None
Ejemplo n.º 3
0
    def fast_push_file(self, kwargs):
        """
        快速分发文件
        """

        kwargs.update({
            "bk_username": self.bk_username,
        })

        result = JobV3Api.fast_transfer_file(kwargs, raw=True)
        if result["result"]:
            query_kwargs = {
                "job_instance_id": result["data"].get("job_instance_id"),
                "bk_biz_id": kwargs.get("bk_biz_id"),
            }
            result = self.get_task_result_status(query_kwargs)
            logger.info(
                build_job_exec_log_format(self.bk_username, 'fast_push_file',
                                          kwargs['task_name']))
            return result
        else:
            logger.error(
                build_job_err_log_format(self.bk_username, 'fast_push_file',
                                         kwargs, result))
            return None
Ejemplo n.º 4
0
    def get_task_result_status(self, kwargs):
        """
        批量查询job任务结果
        """

        result = None
        result_message = {}

        kwargs.update({
            "bk_username": self.bk_username,
        })

        for query_max_times in range(100):
            result = JobV3Api.get_job_instance_status(kwargs, raw=True)
            if result["result"]:
                is_finished = result["data"].get("finished")
                if is_finished:
                    if result["data"].get("job_instance").get("status") != 3:
                        # 错误需要捕捉到错误日志,打印到前端展示
                        step_instance_id = result["data"].get(
                            "step_instance_list")[0]['step_instance_id']
                        kwargs['step_instance_id'] = step_instance_id
                        kwargs['bk_cloud_id'] = bk_cloud_id
                        result_message = self.get_task_ip_log(kwargs)
                    break
                time.sleep(2)
            # 执行中则继续轮询
            else:
                time.sleep(2)
        return result, json.dumps(result_message)
Ejemplo n.º 5
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        hdfs_includes = act_info['hdfs_includes']
        hdfs_excludes = act_info['hdfs_excludes']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']

        target_ips = [info['ip'] for info in hosts_info]

        add_includes_str = get_host_str(
            [info['ip'] for info in hosts_info if info['process_add'] == 1],
            hosts_info)

        dns_hosts_list = []
        dns_hosts_str = ""
        for host in hosts_info:
            if host['add'] == 1:
                dns_hosts_list.append("'{}:{}'".format(host.get("ip"),
                                                       host.get("host_name")))
            dns_hosts_str = ",".join(dns_hosts_list)

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}更新hdfs的include文件配置",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/update_include_config.sh'
            ),
            "script_param":
            get_script_param([
                hdfs_includes, hdfs_excludes, add_includes_str, dns_hosts_str
            ]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 6
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        node_name = data.get_one_of_inputs('node_name')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        name_node_ip_list = act_info['namenode']
        standby_name_node_ip_list = act_info['standbynamenode']
        bk_username = act_info['bk_username']

        if node_name == 'namenode':
            target_ips = name_node_ip_list
        elif node_name == 'standbynamenode':
            target_ips = standby_name_node_ip_list
        else:

            data.outputs.result_message = 'fail'
            return False

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新{node_name}节点启动",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/start_namenode_server.sh'
            ),
            "script_param":
            get_script_param(
                [cluster_user, base_dir, node_name, target_ips[0]]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            logger.error(f"无法识别到部署节点类型{node_name}")
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 7
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_name = act_info['cluster_name']
        cluster_user = act_info['cluster_user']
        hosts_info = act_info['hosts_info']
        bk_username = act_info['bk_username']

        if len([info['ip'] for info in hosts_info if info['add'] == 1]) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        source_ips = [info['ip'] for info in hosts_info]
        target_ips = source_ips

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{cluster_name}节点之间推送公钥文件",
            "file_target_path":
            f"{HADOOP_PACKAGE_PATH}/ssh/",
            "file_source_list": [{
                "file_list": [f"/home/{cluster_user}/.ssh/id_rsa.pub.*"],
                "account": {
                    "alias": "root"
                },
                "server": {
                    "ip_list": get_job_ip_list(source_ips)
                },
            }],
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_transfer_file(
            {
                **kwargs,
                **fast_transfer_file_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 8
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        cluster_name = act_info['cluster_name']
        hosts_info = act_info['hosts_info']
        bk_username = act_info['bk_username']
        ssh_port = str(act_info['ssh_port'])

        if len([info['ip'] for info in hosts_info if info['add'] == 1]) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        target_ips = [info['ip'] for info in hosts_info]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{cluster_name}集群各节点测试免认证",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/check_free_login.sh'
            ),
            "script_param":
            get_script_param(
                [cluster_user, ssh_port,
                 get_host_str(target_ips, hosts_info)]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 9
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        node_name = data.get_one_of_inputs('node_name')
        ops = data.get_one_of_inputs('ops')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        if act_info['task_type'] == 4:
            # 如果是datanode 节点扩容,则目标以下面方式为准,兼容
            target_ips = [
                info['ip'] for info in act_info['hosts_info']
                if info['process_add'] == 1
            ]
        else:
            target_ips = act_info[node_name]
        bk_username = act_info['bk_username']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新datanode节点{ops}",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/hadoop_ops.sh'
            ),
            "script_param":
            get_script_param([base_dir, cluster_user, ops, node_name]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 10
0
 def __get_task_ip_log(kwargs, target_ips):
     """
     根据业务实例id查询和IP信息查询对应作业执行日志结果
     """
     result_message = {}
     for ip in target_ips:
         query_kwargs = kwargs
         query_kwargs['ip'] = ip
         result = JobV3Api.get_job_instance_ip_log(query_kwargs, raw=True)
         if result["result"]:
             result_message[ip] = result["data"].get("log_content")
         else:
             logger.error('{}:{}'.format(ip, result["message"]))
     return result_message
Ejemplo n.º 11
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')
        target_ips = data.get_one_of_inputs('target_ips')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        version = act_info['version']

        package_full_name_list = (
            [es_package_full_name_dict[version]["supervisor"]["package"]] +
            [es_package_full_name_dict[version]["pypy"]["package"]] +
            [es_package_full_name_dict[version]["TencentKona"]["package"]] +
            [es_package_full_name_dict[version]["elasticsearch"]["package"]])

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "file_target_path":
            '/data',
            "file_source_list": [{
                "file_list": package_full_name_list,
                "account": {
                    "alias": "root"
                },
                "server": {
                    "ip_list": package_source_ip_list
                },
            }],
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_transfer_file(
            {
                **kwargs,
                **fast_transfer_file_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 12
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        hdfs_includes = act_info['hdfs_includes']
        hdfs_excludes = act_info['hdfs_excludes']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']
        recycle_datanode_ip_list = act_info['datanode']

        target_ips = [info['ip'] for info in hosts_info]

        remove_includes_str = get_host_str(recycle_datanode_ip_list,
                                           hosts_info)

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群datanode节点加入黑名单",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/remove_datanode_config.sh'
            ),
            "script_param":
            get_script_param(
                [hdfs_includes, hdfs_excludes, remove_includes_str]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 13
0
    def schedule(self, data, parent_data, callback_data=None):
        act_info = data.get_one_of_inputs('act_info')
        bk_username = act_info['bk_username']
        app_id = act_info['app_id']

        if data.get_one_of_outputs('result_message') == 'skip':
            # 表示该节点已经内部跳过,不执行监听
            self.finish_schedule()
            return True

        # 控制是否读取任务正确时读取返回日志的变量,默认false
        is_read_success_message = act_info.get('is_read_success_message')
        if is_read_success_message is None:
            is_read_success_message = False

        target_ips = data.get_one_of_outputs('target_ips')
        job_instance_id = data.get_one_of_outputs('job_instance_id')

        kwargs = {
            "bk_biz_id": app_id,
            "bk_username": bk_username,
            "job_instance_id": job_instance_id,
            "return_ip_result": True,
        }
        res = JobV3Api.get_job_instance_status(kwargs, raw=True)
        if not res['result']:
            return False

        if res['data']['finished']:
            self.finish_schedule()
            step_instance_id = res["data"].get("step_instance_list")[0]['step_instance_id']
            kwargs['step_instance_id'] = step_instance_id
            kwargs['bk_cloud_id'] = bk_cloud_id

            if res["data"].get("job_instance").get("status") == 3:
                # job任务执行成功
                if is_read_success_message:
                    data.outputs.result_message = self.__get_task_ip_log(kwargs=kwargs, target_ips=target_ips)
                else:
                    data.outputs.result_message = "success"
            else:
                # job结束后status不等于3 视为调用失败
                data.outputs.result_message = self.__get_task_ip_log(kwargs=kwargs, target_ips=target_ips)
                return False

        return True
Ejemplo n.º 14
0
    def get_task_ip_log(self, kwargs):
        """
        根据业务实例id查询和IP信息查询对应作业执行日志结果
        """
        result_message = {}

        kwargs.update({"bk_username": self.bk_username})

        for ip in self.ip_list:
            query_kwargs = kwargs
            query_kwargs['ip'] = ip
            result = JobV3Api.get_job_instance_ip_log(query_kwargs, raw=True)
            if result["result"]:
                result_message[ip] = result["data"].get("log_content")
            else:
                logger.error('{}:{}'.format(ip, result["message"]))
        return result_message
Ejemplo n.º 15
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']
        add_dir_str = ",".join(act_info['scaled_up_dir_list'])
        new_dir_str = "{},{}".format(act_info['old_dir_str'], add_dir_str)

        target_ips = [info['ip'] for info in hosts_info]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}更新hdfs的数据目录配置",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/update_data_node_dir.sh'
            ),
            "script_param":
            get_script_param([cluster_user, base_dir, new_dir_str]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 16
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        role = data.get_one_of_inputs('role')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        cluster_name = act_info['cluster_name']
        master_str = act_info['master_str']
        http_port = act_info['http_port']
        version = act_info['version']
        target_ips = act_info[f'{role}_list']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            get_script(
                'es_bamboo/components/collections/script_templates/install_es.sh'
            ),
            "script_param":
            get_script_param(
                [cluster_name, master_str, role, http_port, version]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 17
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        target_ips = act_info['datanode']
        cluster_user = act_info['cluster_user']
        add_dir_str = ",".join(act_info['scaled_up_dir_list'])
        bk_username = act_info['bk_username']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群检测新加目录",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/is_dir_exists.sh'
            ),
            "script_param":
            get_script_param([add_dir_str, cluster_user]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 18
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        node_name = data.get_one_of_inputs('node_name')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        bk_username = act_info['bk_username']

        target_ips = act_info[node_name]
        kwargs = {
            'bk_biz_id':
            app_id,
            "bk_username":
            bk_username,
            'task_name':
            f"{act_info['cluster_name']}集群检测进程是否正常匹配",
            'script_content':
            get_script(
                'hadoop_bamboo/components/collections/script_templates/check_node.sh'
            ),
            "script_param":
            get_script_param([cluster_user, node_name]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 19
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')
        bk_username = act_info['bk_username']
        app_id = act_info['app_id']

        account = act_info['account']
        password = act_info['password']
        version = act_info['version']
        target_ips = [act_info['master_list'][0]]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            self.__search_guard_init_script(version=version,
                                            superuser=ES_ADMIN,
                                            super_password=ES_ADMIN_PASSWORD,
                                            account=account,
                                            password=password),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 20
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        target_ips = [act_info['target_ip']]
        input_http_url = act_info['input_http_url']
        account = act_info['account']
        password = act_info['password']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            get_script(
                'es_bamboo/components/collections/script_templates/get_es_node.sh'
            ),
            "script_param":
            get_script_param([input_http_url, account, password]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }
        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 21
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        target_ips = [act_info['master_list'][0]]

        # 等待es进程拉起
        time.sleep(80)

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            get_script(
                'es_bamboo/components/collections/script_templates/search_guard_init.sh'
            ),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }
        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 22
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        hdfs_includes = act_info['hdfs_includes']
        hdfs_excludes = act_info['hdfs_excludes']
        hosts_info = act_info['hosts_info']
        bk_username = act_info['bk_username']
        replication_number = str(act_info['replication_number'])
        zookeeper_list = act_info['zookeepernode']
        journal_list = act_info['journalnode']
        namenode_list = act_info['namenode']
        standynamenode_list = act_info['standbynamenode']
        datanode_list = act_info['datanode']
        resourcemanger_list = act_info['resourcemanager']
        nodemanger_list = act_info['nodemanager']
        data_dir_str = ",".join(act_info['data_disk_dir_list'])
        ops_type = act_info['ops_type']

        # 生成hostname列表
        zookeeper_hostname_str = get_host_str(zookeeper_list, hosts_info)
        journal_hostname_str = get_host_str(journal_list, hosts_info)
        namenode_hostname_str = get_host_str(namenode_list, hosts_info)
        standynamenode_hostname_str = get_host_str(standynamenode_list,
                                                   hosts_info)
        datanode_hostname_str = get_host_str(datanode_list, hosts_info)
        resourcemangerhostname_str = get_host_str(resourcemanger_list,
                                                  hosts_info)
        nodemanger_hostname_str = get_host_str(nodemanger_list, hosts_info)

        target_ips = [
            info['ip'] for info in hosts_info if info['process_add'] == 1
        ]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新节点推送配置过程",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/push_config.sh'
            ),
            "script_param":
            get_script_param([
                cluster_user,
                base_dir,
                hdfs_includes,
                hdfs_excludes,
                zookeeper_hostname_str,
                journal_hostname_str,
                namenode_hostname_str,
                standynamenode_hostname_str,
                datanode_hostname_str,
                resourcemangerhostname_str,
                nodemanger_hostname_str,
                data_dir_str,
                replication_number,
                ops_type,
            ]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 23
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']
        cluster_version = act_info['cluster_version']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        data_disk_dir_list = act_info['data_disk_dir_list']

        java_file = "{}/java-{}.tar.gz".format(
            HADOOP_PACKAGE_PATH,
            hadoop_package_full_name_dict[cluster_version]["java"]["version"])
        hadoop_file = "{}/hadoop-{}.tar.gz".format(
            HADOOP_PACKAGE_PATH, hadoop_package_full_name_dict[cluster_version]
            ["hadoop"]["version"])
        zookeeper_file = "{}/zookeeper-{}.tar.gz".format(
            HADOOP_PACKAGE_PATH, hadoop_package_full_name_dict[cluster_version]
            ["zookeeper"]["version"])

        # 解决shell数组遍历问题,没有字符串都需要加'' 来封装来传递
        hadoop_data_str = ",".join(data_disk_dir_list)

        dns_hosts_list = []
        for host in hosts_info:
            ip = host.get("ip")
            host_name = host.get("host_name")
            dns_hosts_list.append("'{}:{}'".format(ip, host_name))
        dns_hosts_str = ",".join(dns_hosts_list)

        target_ips = [info['ip'] for info in hosts_info if info['add'] == 1]

        if len(target_ips) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新节点初始化过程",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/init_before_deploy.sh'
            ),
            "script_param":
            get_script_param([
                cluster_user, base_dir, hadoop_file, java_file, zookeeper_file,
                hadoop_data_str, dns_hosts_str
            ]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
Ejemplo n.º 24
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        hosts_info = act_info['hosts_info']
        cluster_version = act_info['cluster_version']
        bk_username = act_info['bk_username']

        target_ips = [info['ip'] for info in hosts_info if info['add'] == 1]

        if len(target_ips) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        package_full_name_list = ([
            hadoop_package_full_name_dict[cluster_version]["hadoop"]["package"]
        ] + [
            hadoop_package_full_name_dict[cluster_version]["java"]["package"]
        ] + [
            hadoop_package_full_name_dict[cluster_version]["zookeeper"]
            ["package"]
        ])

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新节点安装包分发过程",
            "file_target_path":
            HADOOP_PACKAGE_PATH,
            "file_source_list": [{
                "file_list": package_full_name_list,
                "account": {
                    "alias": "root"
                },
                "server": {
                    "ip_list": package_source_ip_list
                },
            }],
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
            'account_alias':
            'root',
        }

        res = JobV3Api.fast_transfer_file(
            {
                **kwargs,
                **fast_transfer_file_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']