예제 #1
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_name = act_info['cluster_name']
        cluster_user = act_info['cluster_user']
        hosts_info = act_info['hosts_info']
        bk_username = act_info['bk_username']

        if len([info['ip'] for info in hosts_info if info['add'] == 1]) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        source_ips = [info['ip'] for info in hosts_info]
        target_ips = source_ips

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{cluster_name}节点之间推送公钥文件",
            "file_target_path":
            f"{HADOOP_PACKAGE_PATH}/ssh/",
            "file_source_list": [{
                "file_list": [f"/home/{cluster_user}/.ssh/id_rsa.pub.*"],
                "account": {
                    "alias": "root"
                },
                "server": {
                    "ip_list": get_job_ip_list(source_ips)
                },
            }],
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_transfer_file(
            {
                **kwargs,
                **fast_transfer_file_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #2
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        hosts_info = act_info['hosts_info']
        target_ips = act_info['zookeepernode']
        bk_username = act_info['bk_username']

        zookeeper_hostname_str = get_host_str(target_ips, hosts_info)

        kwargs = {
            "bk_biz_id": app_id,
            "bk_username": bk_username,
            "task_name": f"{act_info['cluster_name']}集群新zk集群启动",
            "script_content": get_script('hadoop_bamboo/components/collections/script_templates/start_zk_server.sh'),
            "script_param": get_script_param([base_dir, cluster_user, zookeeper_hostname_str]),
            "target_server": {"ip_list": get_job_ip_list(target_ips)},
        }

        res = JobV3Api.fast_execute_script({**kwargs, **fast_execute_script_common_kwargs}, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #3
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        hdfs_includes = act_info['hdfs_includes']
        hdfs_excludes = act_info['hdfs_excludes']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']

        target_ips = [info['ip'] for info in hosts_info]

        add_includes_str = get_host_str(
            [info['ip'] for info in hosts_info if info['process_add'] == 1],
            hosts_info)

        dns_hosts_list = []
        dns_hosts_str = ""
        for host in hosts_info:
            if host['add'] == 1:
                dns_hosts_list.append("'{}:{}'".format(host.get("ip"),
                                                       host.get("host_name")))
            dns_hosts_str = ",".join(dns_hosts_list)

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}更新hdfs的include文件配置",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/update_include_config.sh'
            ),
            "script_param":
            get_script_param([
                hdfs_includes, hdfs_excludes, add_includes_str, dns_hosts_str
            ]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #4
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        node_name = data.get_one_of_inputs('node_name')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        name_node_ip_list = act_info['namenode']
        standby_name_node_ip_list = act_info['standbynamenode']
        bk_username = act_info['bk_username']

        if node_name == 'namenode':
            target_ips = name_node_ip_list
        elif node_name == 'standbynamenode':
            target_ips = standby_name_node_ip_list
        else:

            data.outputs.result_message = 'fail'
            return False

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新{node_name}节点启动",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/start_namenode_server.sh'
            ),
            "script_param":
            get_script_param(
                [cluster_user, base_dir, node_name, target_ips[0]]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            logger.error(f"无法识别到部署节点类型{node_name}")
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #5
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        node_name = data.get_one_of_inputs('node_name')
        ops = data.get_one_of_inputs('ops')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        if act_info['task_type'] == 4:
            # 如果是datanode 节点扩容,则目标以下面方式为准,兼容
            target_ips = [
                info['ip'] for info in act_info['hosts_info']
                if info['process_add'] == 1
            ]
        else:
            target_ips = act_info[node_name]
        bk_username = act_info['bk_username']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新datanode节点{ops}",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/hadoop_ops.sh'
            ),
            "script_param":
            get_script_param([base_dir, cluster_user, ops, node_name]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #6
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        cluster_name = act_info['cluster_name']
        hosts_info = act_info['hosts_info']
        bk_username = act_info['bk_username']
        ssh_port = str(act_info['ssh_port'])

        if len([info['ip'] for info in hosts_info if info['add'] == 1]) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        target_ips = [info['ip'] for info in hosts_info]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{cluster_name}集群各节点测试免认证",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/check_free_login.sh'
            ),
            "script_param":
            get_script_param(
                [cluster_user, ssh_port,
                 get_host_str(target_ips, hosts_info)]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #7
0
파일: es_push_pkg.py 프로젝트: pagezhou/DOP
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')
        target_ips = data.get_one_of_inputs('target_ips')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        version = act_info['version']

        package_full_name_list = (
            [es_package_full_name_dict[version]["supervisor"]["package"]] +
            [es_package_full_name_dict[version]["pypy"]["package"]] +
            [es_package_full_name_dict[version]["TencentKona"]["package"]] +
            [es_package_full_name_dict[version]["elasticsearch"]["package"]])

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "file_target_path":
            '/data',
            "file_source_list": [{
                "file_list": package_full_name_list,
                "account": {
                    "alias": "root"
                },
                "server": {
                    "ip_list": package_source_ip_list
                },
            }],
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_transfer_file(
            {
                **kwargs,
                **fast_transfer_file_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #8
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        hdfs_includes = act_info['hdfs_includes']
        hdfs_excludes = act_info['hdfs_excludes']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']
        recycle_datanode_ip_list = act_info['datanode']

        target_ips = [info['ip'] for info in hosts_info]

        remove_includes_str = get_host_str(recycle_datanode_ip_list,
                                           hosts_info)

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群datanode节点加入黑名单",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/remove_datanode_config.sh'
            ),
            "script_param":
            get_script_param(
                [hdfs_includes, hdfs_excludes, remove_includes_str]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #9
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']
        add_dir_str = ",".join(act_info['scaled_up_dir_list'])
        new_dir_str = "{},{}".format(act_info['old_dir_str'], add_dir_str)

        target_ips = [info['ip'] for info in hosts_info]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}更新hdfs的数据目录配置",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/update_data_node_dir.sh'
            ),
            "script_param":
            get_script_param([cluster_user, base_dir, new_dir_str]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #10
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        role = data.get_one_of_inputs('role')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        cluster_name = act_info['cluster_name']
        master_str = act_info['master_str']
        http_port = act_info['http_port']
        version = act_info['version']
        target_ips = act_info[f'{role}_list']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            get_script(
                'es_bamboo/components/collections/script_templates/install_es.sh'
            ),
            "script_param":
            get_script_param(
                [cluster_name, master_str, role, http_port, version]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #11
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        target_ips = act_info['datanode']
        cluster_user = act_info['cluster_user']
        add_dir_str = ",".join(act_info['scaled_up_dir_list'])
        bk_username = act_info['bk_username']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群检测新加目录",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/is_dir_exists.sh'
            ),
            "script_param":
            get_script_param([add_dir_str, cluster_user]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #12
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')
        node_name = data.get_one_of_inputs('node_name')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        bk_username = act_info['bk_username']

        target_ips = act_info[node_name]
        kwargs = {
            'bk_biz_id':
            app_id,
            "bk_username":
            bk_username,
            'task_name':
            f"{act_info['cluster_name']}集群检测进程是否正常匹配",
            'script_content':
            get_script(
                'hadoop_bamboo/components/collections/script_templates/check_node.sh'
            ),
            "script_param":
            get_script_param([cluster_user, node_name]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #13
0
파일: topic_ops.py 프로젝트: pagezhou/DOP
def create_topic(bk_username, cluster_name, topic_name):
    """
       定义创建topic方法,属于同步任务
    """
    if not topic_name:
        logger.error("传入的topic名称为空,请检查")
        return False
    app_id = KafkaCluster.objects.get(cluster_name=cluster_name).app_id
    broker_ip = KafkaBroker.objects.filter(
        cluster_name=cluster_name).values('ip')[0]['ip']
    broker_url = broker_ip + ":9092"

    client = JobExecutor(bk_username, [broker_ip])
    result, message = client.fast_execute_script({
        "bk_biz_id":
        app_id,
        "script_content":
        get_script(
            'kafka_bamboo/components/collections/script_templates/topic_ops.sh'
        ),
        "script_param":
        get_script_param([broker_url, topic_name]),
        "target_server": {
            "ip_list": get_job_ip_list([broker_ip])
        },
        "task_name":
        f"{cluster_name}集群创建topic:{topic_name}",
    })

    if result and result["data"].get("job_instance").get("status") == 3:
        # 任务执行成功,则更新数据信息,正确返回
        Topic.objects.create(cluster_name=cluster_name,
                             topic=topic_name,
                             create_by=bk_username)
        return True

    # 任务执行失败,则打印错误信息,并异常返回
    logger.error(message)
    return False
예제 #14
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')
        bk_username = act_info['bk_username']
        app_id = act_info['app_id']

        account = act_info['account']
        password = act_info['password']
        version = act_info['version']
        target_ips = [act_info['master_list'][0]]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            self.__search_guard_init_script(version=version,
                                            superuser=ES_ADMIN,
                                            super_password=ES_ADMIN_PASSWORD,
                                            account=account,
                                            password=password),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #15
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        target_ips = [act_info['target_ip']]
        input_http_url = act_info['input_http_url']
        account = act_info['account']
        password = act_info['password']

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            get_script(
                'es_bamboo/components/collections/script_templates/get_es_node.sh'
            ),
            "script_param":
            get_script_param([input_http_url, account, password]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }
        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #16
0
    def execute(self, data, parent_data):
        act_info = data.get_one_of_inputs('act_info')

        bk_username = act_info['bk_username']
        app_id = act_info['app_id']
        target_ips = [act_info['master_list'][0]]

        # 等待es进程拉起
        time.sleep(80)

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "script_content":
            get_script(
                'es_bamboo/components/collections/script_templates/search_guard_init.sh'
            ),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }
        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #17
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        hdfs_includes = act_info['hdfs_includes']
        hdfs_excludes = act_info['hdfs_excludes']
        hosts_info = act_info['hosts_info']
        bk_username = act_info['bk_username']
        replication_number = str(act_info['replication_number'])
        zookeeper_list = act_info['zookeepernode']
        journal_list = act_info['journalnode']
        namenode_list = act_info['namenode']
        standynamenode_list = act_info['standbynamenode']
        datanode_list = act_info['datanode']
        resourcemanger_list = act_info['resourcemanager']
        nodemanger_list = act_info['nodemanager']
        data_dir_str = ",".join(act_info['data_disk_dir_list'])
        ops_type = act_info['ops_type']

        # 生成hostname列表
        zookeeper_hostname_str = get_host_str(zookeeper_list, hosts_info)
        journal_hostname_str = get_host_str(journal_list, hosts_info)
        namenode_hostname_str = get_host_str(namenode_list, hosts_info)
        standynamenode_hostname_str = get_host_str(standynamenode_list,
                                                   hosts_info)
        datanode_hostname_str = get_host_str(datanode_list, hosts_info)
        resourcemangerhostname_str = get_host_str(resourcemanger_list,
                                                  hosts_info)
        nodemanger_hostname_str = get_host_str(nodemanger_list, hosts_info)

        target_ips = [
            info['ip'] for info in hosts_info if info['process_add'] == 1
        ]

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新节点推送配置过程",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/push_config.sh'
            ),
            "script_param":
            get_script_param([
                cluster_user,
                base_dir,
                hdfs_includes,
                hdfs_excludes,
                zookeeper_hostname_str,
                journal_hostname_str,
                namenode_hostname_str,
                standynamenode_hostname_str,
                datanode_hostname_str,
                resourcemangerhostname_str,
                nodemanger_hostname_str,
                data_dir_str,
                replication_number,
                ops_type,
            ]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #18
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        bk_username = act_info['bk_username']
        hosts_info = act_info['hosts_info']
        cluster_version = act_info['cluster_version']
        cluster_user = act_info['cluster_user']
        base_dir = act_info['base_dir']
        data_disk_dir_list = act_info['data_disk_dir_list']

        java_file = "{}/java-{}.tar.gz".format(
            HADOOP_PACKAGE_PATH,
            hadoop_package_full_name_dict[cluster_version]["java"]["version"])
        hadoop_file = "{}/hadoop-{}.tar.gz".format(
            HADOOP_PACKAGE_PATH, hadoop_package_full_name_dict[cluster_version]
            ["hadoop"]["version"])
        zookeeper_file = "{}/zookeeper-{}.tar.gz".format(
            HADOOP_PACKAGE_PATH, hadoop_package_full_name_dict[cluster_version]
            ["zookeeper"]["version"])

        # 解决shell数组遍历问题,没有字符串都需要加'' 来封装来传递
        hadoop_data_str = ",".join(data_disk_dir_list)

        dns_hosts_list = []
        for host in hosts_info:
            ip = host.get("ip")
            host_name = host.get("host_name")
            dns_hosts_list.append("'{}:{}'".format(ip, host_name))
        dns_hosts_str = ",".join(dns_hosts_list)

        target_ips = [info['ip'] for info in hosts_info if info['add'] == 1]

        if len(target_ips) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新节点初始化过程",
            "script_content":
            get_script(
                'hadoop_bamboo/components/collections/script_templates/init_before_deploy.sh'
            ),
            "script_param":
            get_script_param([
                cluster_user, base_dir, hadoop_file, java_file, zookeeper_file,
                hadoop_data_str, dns_hosts_str
            ]),
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
        }

        res = JobV3Api.fast_execute_script(
            {
                **kwargs,
                **fast_execute_script_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']
예제 #19
0
    def execute(self, data, parent_data):

        act_info = data.get_one_of_inputs('act_info')

        app_id = act_info['app_id']
        hosts_info = act_info['hosts_info']
        cluster_version = act_info['cluster_version']
        bk_username = act_info['bk_username']

        target_ips = [info['ip'] for info in hosts_info if info['add'] == 1]

        if len(target_ips) == 0:
            logger_celery.warning("该活动节点没有对应新的ip可以执行,正常返回")
            data.outputs.result_message = "skip"
            return True

        package_full_name_list = ([
            hadoop_package_full_name_dict[cluster_version]["hadoop"]["package"]
        ] + [
            hadoop_package_full_name_dict[cluster_version]["java"]["package"]
        ] + [
            hadoop_package_full_name_dict[cluster_version]["zookeeper"]
            ["package"]
        ])

        kwargs = {
            "bk_biz_id":
            app_id,
            "bk_username":
            bk_username,
            "task_name":
            f"{act_info['cluster_name']}集群新节点安装包分发过程",
            "file_target_path":
            HADOOP_PACKAGE_PATH,
            "file_source_list": [{
                "file_list": package_full_name_list,
                "account": {
                    "alias": "root"
                },
                "server": {
                    "ip_list": package_source_ip_list
                },
            }],
            "target_server": {
                "ip_list": get_job_ip_list(target_ips)
            },
            'account_alias':
            'root',
        }

        res = JobV3Api.fast_transfer_file(
            {
                **kwargs,
                **fast_transfer_file_common_kwargs
            }, raw=True)
        if not res['result']:
            # 调用job任务失败,则结果直接输出fail给前端展示
            data.outputs.result_message = 'fail'
        else:
            job_instance_id = res['data']['job_instance_id']
            data.outputs.job_instance_id = job_instance_id
            data.outputs.target_ips = target_ips
        return res['result']